Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Asynchronous Cryptographic Hash operations.
4 *
5 * This is the asynchronous version of hash.c with notification of
6 * completion via a callback.
7 *
8 * Copyright (c) 2008 Loc Ho <lho@amcc.com>
9 */
10
11#include <crypto/internal/hash.h>
12#include <crypto/scatterwalk.h>
13#include <linux/bug.h>
14#include <linux/err.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/sched.h>
18#include <linux/slab.h>
19#include <linux/seq_file.h>
20#include <linux/cryptouser.h>
21#include <linux/compiler.h>
22#include <net/netlink.h>
23
24#include "internal.h"
25
26struct ahash_request_priv {
27 crypto_completion_t complete;
28 void *data;
29 u8 *result;
30 u32 flags;
31 void *ubuf[] CRYPTO_MINALIGN_ATTR;
32};
33
34static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
35{
36 return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
37 halg);
38}
39
40static int hash_walk_next(struct crypto_hash_walk *walk)
41{
42 unsigned int alignmask = walk->alignmask;
43 unsigned int offset = walk->offset;
44 unsigned int nbytes = min(walk->entrylen,
45 ((unsigned int)(PAGE_SIZE)) - offset);
46
47 if (walk->flags & CRYPTO_ALG_ASYNC)
48 walk->data = kmap(walk->pg);
49 else
50 walk->data = kmap_atomic(walk->pg);
51 walk->data += offset;
52
53 if (offset & alignmask) {
54 unsigned int unaligned = alignmask + 1 - (offset & alignmask);
55
56 if (nbytes > unaligned)
57 nbytes = unaligned;
58 }
59
60 walk->entrylen -= nbytes;
61 return nbytes;
62}
63
64static int hash_walk_new_entry(struct crypto_hash_walk *walk)
65{
66 struct scatterlist *sg;
67
68 sg = walk->sg;
69 walk->offset = sg->offset;
70 walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
71 walk->offset = offset_in_page(walk->offset);
72 walk->entrylen = sg->length;
73
74 if (walk->entrylen > walk->total)
75 walk->entrylen = walk->total;
76 walk->total -= walk->entrylen;
77
78 return hash_walk_next(walk);
79}
80
81int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
82{
83 unsigned int alignmask = walk->alignmask;
84
85 walk->data -= walk->offset;
86
87 if (walk->entrylen && (walk->offset & alignmask) && !err) {
88 unsigned int nbytes;
89
90 walk->offset = ALIGN(walk->offset, alignmask + 1);
91 nbytes = min(walk->entrylen,
92 (unsigned int)(PAGE_SIZE - walk->offset));
93 if (nbytes) {
94 walk->entrylen -= nbytes;
95 walk->data += walk->offset;
96 return nbytes;
97 }
98 }
99
100 if (walk->flags & CRYPTO_ALG_ASYNC)
101 kunmap(walk->pg);
102 else {
103 kunmap_atomic(walk->data);
104 /*
105 * The may sleep test only makes sense for sync users.
106 * Async users don't need to sleep here anyway.
107 */
108 crypto_yield(walk->flags);
109 }
110
111 if (err)
112 return err;
113
114 if (walk->entrylen) {
115 walk->offset = 0;
116 walk->pg++;
117 return hash_walk_next(walk);
118 }
119
120 if (!walk->total)
121 return 0;
122
123 walk->sg = sg_next(walk->sg);
124
125 return hash_walk_new_entry(walk);
126}
127EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
128
129int crypto_hash_walk_first(struct ahash_request *req,
130 struct crypto_hash_walk *walk)
131{
132 walk->total = req->nbytes;
133
134 if (!walk->total) {
135 walk->entrylen = 0;
136 return 0;
137 }
138
139 walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
140 walk->sg = req->src;
141 walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
142
143 return hash_walk_new_entry(walk);
144}
145EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
146
147int crypto_ahash_walk_first(struct ahash_request *req,
148 struct crypto_hash_walk *walk)
149{
150 walk->total = req->nbytes;
151
152 if (!walk->total) {
153 walk->entrylen = 0;
154 return 0;
155 }
156
157 walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
158 walk->sg = req->src;
159 walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
160 walk->flags |= CRYPTO_ALG_ASYNC;
161
162 BUILD_BUG_ON(CRYPTO_TFM_REQ_MASK & CRYPTO_ALG_ASYNC);
163
164 return hash_walk_new_entry(walk);
165}
166EXPORT_SYMBOL_GPL(crypto_ahash_walk_first);
167
168static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
169 unsigned int keylen)
170{
171 unsigned long alignmask = crypto_ahash_alignmask(tfm);
172 int ret;
173 u8 *buffer, *alignbuffer;
174 unsigned long absize;
175
176 absize = keylen + alignmask;
177 buffer = kmalloc(absize, GFP_KERNEL);
178 if (!buffer)
179 return -ENOMEM;
180
181 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
182 memcpy(alignbuffer, key, keylen);
183 ret = tfm->setkey(tfm, alignbuffer, keylen);
184 kzfree(buffer);
185 return ret;
186}
187
188static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
189 unsigned int keylen)
190{
191 return -ENOSYS;
192}
193
194static void ahash_set_needkey(struct crypto_ahash *tfm)
195{
196 const struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
197
198 if (tfm->setkey != ahash_nosetkey &&
199 !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
200 crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
201}
202
203int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
204 unsigned int keylen)
205{
206 unsigned long alignmask = crypto_ahash_alignmask(tfm);
207 int err;
208
209 if ((unsigned long)key & alignmask)
210 err = ahash_setkey_unaligned(tfm, key, keylen);
211 else
212 err = tfm->setkey(tfm, key, keylen);
213
214 if (unlikely(err)) {
215 ahash_set_needkey(tfm);
216 return err;
217 }
218
219 crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
220 return 0;
221}
222EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
223
224static inline unsigned int ahash_align_buffer_size(unsigned len,
225 unsigned long mask)
226{
227 return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
228}
229
230static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
231{
232 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
233 unsigned long alignmask = crypto_ahash_alignmask(tfm);
234 unsigned int ds = crypto_ahash_digestsize(tfm);
235 struct ahash_request_priv *priv;
236
237 priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
238 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
239 GFP_KERNEL : GFP_ATOMIC);
240 if (!priv)
241 return -ENOMEM;
242
243 /*
244 * WARNING: Voodoo programming below!
245 *
246 * The code below is obscure and hard to understand, thus explanation
247 * is necessary. See include/crypto/hash.h and include/linux/crypto.h
248 * to understand the layout of structures used here!
249 *
250 * The code here will replace portions of the ORIGINAL request with
251 * pointers to new code and buffers so the hashing operation can store
252 * the result in aligned buffer. We will call the modified request
253 * an ADJUSTED request.
254 *
255 * The newly mangled request will look as such:
256 *
257 * req {
258 * .result = ADJUSTED[new aligned buffer]
259 * .base.complete = ADJUSTED[pointer to completion function]
260 * .base.data = ADJUSTED[*req (pointer to self)]
261 * .priv = ADJUSTED[new priv] {
262 * .result = ORIGINAL(result)
263 * .complete = ORIGINAL(base.complete)
264 * .data = ORIGINAL(base.data)
265 * }
266 */
267
268 priv->result = req->result;
269 priv->complete = req->base.complete;
270 priv->data = req->base.data;
271 priv->flags = req->base.flags;
272
273 /*
274 * WARNING: We do not backup req->priv here! The req->priv
275 * is for internal use of the Crypto API and the
276 * user must _NOT_ _EVER_ depend on it's content!
277 */
278
279 req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
280 req->base.complete = cplt;
281 req->base.data = req;
282 req->priv = priv;
283
284 return 0;
285}
286
287static void ahash_restore_req(struct ahash_request *req, int err)
288{
289 struct ahash_request_priv *priv = req->priv;
290
291 if (!err)
292 memcpy(priv->result, req->result,
293 crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
294
295 /* Restore the original crypto request. */
296 req->result = priv->result;
297
298 ahash_request_set_callback(req, priv->flags,
299 priv->complete, priv->data);
300 req->priv = NULL;
301
302 /* Free the req->priv.priv from the ADJUSTED request. */
303 kzfree(priv);
304}
305
306static void ahash_notify_einprogress(struct ahash_request *req)
307{
308 struct ahash_request_priv *priv = req->priv;
309 struct crypto_async_request oreq;
310
311 oreq.data = priv->data;
312
313 priv->complete(&oreq, -EINPROGRESS);
314}
315
316static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
317{
318 struct ahash_request *areq = req->data;
319
320 if (err == -EINPROGRESS) {
321 ahash_notify_einprogress(areq);
322 return;
323 }
324
325 /*
326 * Restore the original request, see ahash_op_unaligned() for what
327 * goes where.
328 *
329 * The "struct ahash_request *req" here is in fact the "req.base"
330 * from the ADJUSTED request from ahash_op_unaligned(), thus as it
331 * is a pointer to self, it is also the ADJUSTED "req" .
332 */
333
334 /* First copy req->result into req->priv.result */
335 ahash_restore_req(areq, err);
336
337 /* Complete the ORIGINAL request. */
338 areq->base.complete(&areq->base, err);
339}
340
341static int ahash_op_unaligned(struct ahash_request *req,
342 int (*op)(struct ahash_request *))
343{
344 int err;
345
346 err = ahash_save_req(req, ahash_op_unaligned_done);
347 if (err)
348 return err;
349
350 err = op(req);
351 if (err == -EINPROGRESS || err == -EBUSY)
352 return err;
353
354 ahash_restore_req(req, err);
355
356 return err;
357}
358
359static int crypto_ahash_op(struct ahash_request *req,
360 int (*op)(struct ahash_request *))
361{
362 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
363 unsigned long alignmask = crypto_ahash_alignmask(tfm);
364
365 if ((unsigned long)req->result & alignmask)
366 return ahash_op_unaligned(req, op);
367
368 return op(req);
369}
370
371int crypto_ahash_final(struct ahash_request *req)
372{
373 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
374 struct crypto_alg *alg = tfm->base.__crt_alg;
375 unsigned int nbytes = req->nbytes;
376 int ret;
377
378 crypto_stats_get(alg);
379 ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
380 crypto_stats_ahash_final(nbytes, ret, alg);
381 return ret;
382}
383EXPORT_SYMBOL_GPL(crypto_ahash_final);
384
385int crypto_ahash_finup(struct ahash_request *req)
386{
387 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
388 struct crypto_alg *alg = tfm->base.__crt_alg;
389 unsigned int nbytes = req->nbytes;
390 int ret;
391
392 crypto_stats_get(alg);
393 ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
394 crypto_stats_ahash_final(nbytes, ret, alg);
395 return ret;
396}
397EXPORT_SYMBOL_GPL(crypto_ahash_finup);
398
399int crypto_ahash_digest(struct ahash_request *req)
400{
401 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
402 struct crypto_alg *alg = tfm->base.__crt_alg;
403 unsigned int nbytes = req->nbytes;
404 int ret;
405
406 crypto_stats_get(alg);
407 if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
408 ret = -ENOKEY;
409 else
410 ret = crypto_ahash_op(req, tfm->digest);
411 crypto_stats_ahash_final(nbytes, ret, alg);
412 return ret;
413}
414EXPORT_SYMBOL_GPL(crypto_ahash_digest);
415
416static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
417{
418 struct ahash_request *areq = req->data;
419
420 if (err == -EINPROGRESS)
421 return;
422
423 ahash_restore_req(areq, err);
424
425 areq->base.complete(&areq->base, err);
426}
427
428static int ahash_def_finup_finish1(struct ahash_request *req, int err)
429{
430 if (err)
431 goto out;
432
433 req->base.complete = ahash_def_finup_done2;
434
435 err = crypto_ahash_reqtfm(req)->final(req);
436 if (err == -EINPROGRESS || err == -EBUSY)
437 return err;
438
439out:
440 ahash_restore_req(req, err);
441 return err;
442}
443
444static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
445{
446 struct ahash_request *areq = req->data;
447
448 if (err == -EINPROGRESS) {
449 ahash_notify_einprogress(areq);
450 return;
451 }
452
453 areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
454
455 err = ahash_def_finup_finish1(areq, err);
456 if (areq->priv)
457 return;
458
459 areq->base.complete(&areq->base, err);
460}
461
462static int ahash_def_finup(struct ahash_request *req)
463{
464 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
465 int err;
466
467 err = ahash_save_req(req, ahash_def_finup_done1);
468 if (err)
469 return err;
470
471 err = tfm->update(req);
472 if (err == -EINPROGRESS || err == -EBUSY)
473 return err;
474
475 return ahash_def_finup_finish1(req, err);
476}
477
478static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
479{
480 struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
481 struct ahash_alg *alg = crypto_ahash_alg(hash);
482
483 hash->setkey = ahash_nosetkey;
484
485 if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
486 return crypto_init_shash_ops_async(tfm);
487
488 hash->init = alg->init;
489 hash->update = alg->update;
490 hash->final = alg->final;
491 hash->finup = alg->finup ?: ahash_def_finup;
492 hash->digest = alg->digest;
493 hash->export = alg->export;
494 hash->import = alg->import;
495
496 if (alg->setkey) {
497 hash->setkey = alg->setkey;
498 ahash_set_needkey(hash);
499 }
500
501 return 0;
502}
503
504static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
505{
506 if (alg->cra_type != &crypto_ahash_type)
507 return sizeof(struct crypto_shash *);
508
509 return crypto_alg_extsize(alg);
510}
511
512#ifdef CONFIG_NET
513static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
514{
515 struct crypto_report_hash rhash;
516
517 memset(&rhash, 0, sizeof(rhash));
518
519 strscpy(rhash.type, "ahash", sizeof(rhash.type));
520
521 rhash.blocksize = alg->cra_blocksize;
522 rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
523
524 return nla_put(skb, CRYPTOCFGA_REPORT_HASH, sizeof(rhash), &rhash);
525}
526#else
527static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
528{
529 return -ENOSYS;
530}
531#endif
532
533static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
534 __maybe_unused;
535static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
536{
537 seq_printf(m, "type : ahash\n");
538 seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
539 "yes" : "no");
540 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
541 seq_printf(m, "digestsize : %u\n",
542 __crypto_hash_alg_common(alg)->digestsize);
543}
544
545const struct crypto_type crypto_ahash_type = {
546 .extsize = crypto_ahash_extsize,
547 .init_tfm = crypto_ahash_init_tfm,
548#ifdef CONFIG_PROC_FS
549 .show = crypto_ahash_show,
550#endif
551 .report = crypto_ahash_report,
552 .maskclear = ~CRYPTO_ALG_TYPE_MASK,
553 .maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
554 .type = CRYPTO_ALG_TYPE_AHASH,
555 .tfmsize = offsetof(struct crypto_ahash, base),
556};
557EXPORT_SYMBOL_GPL(crypto_ahash_type);
558
559struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
560 u32 mask)
561{
562 return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
563}
564EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
565
566int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
567{
568 return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask);
569}
570EXPORT_SYMBOL_GPL(crypto_has_ahash);
571
572static int ahash_prepare_alg(struct ahash_alg *alg)
573{
574 struct crypto_alg *base = &alg->halg.base;
575
576 if (alg->halg.digestsize > HASH_MAX_DIGESTSIZE ||
577 alg->halg.statesize > HASH_MAX_STATESIZE ||
578 alg->halg.statesize == 0)
579 return -EINVAL;
580
581 base->cra_type = &crypto_ahash_type;
582 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
583 base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
584
585 return 0;
586}
587
588int crypto_register_ahash(struct ahash_alg *alg)
589{
590 struct crypto_alg *base = &alg->halg.base;
591 int err;
592
593 err = ahash_prepare_alg(alg);
594 if (err)
595 return err;
596
597 return crypto_register_alg(base);
598}
599EXPORT_SYMBOL_GPL(crypto_register_ahash);
600
601int crypto_unregister_ahash(struct ahash_alg *alg)
602{
603 return crypto_unregister_alg(&alg->halg.base);
604}
605EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
606
607int crypto_register_ahashes(struct ahash_alg *algs, int count)
608{
609 int i, ret;
610
611 for (i = 0; i < count; i++) {
612 ret = crypto_register_ahash(&algs[i]);
613 if (ret)
614 goto err;
615 }
616
617 return 0;
618
619err:
620 for (--i; i >= 0; --i)
621 crypto_unregister_ahash(&algs[i]);
622
623 return ret;
624}
625EXPORT_SYMBOL_GPL(crypto_register_ahashes);
626
627void crypto_unregister_ahashes(struct ahash_alg *algs, int count)
628{
629 int i;
630
631 for (i = count - 1; i >= 0; --i)
632 crypto_unregister_ahash(&algs[i]);
633}
634EXPORT_SYMBOL_GPL(crypto_unregister_ahashes);
635
636int ahash_register_instance(struct crypto_template *tmpl,
637 struct ahash_instance *inst)
638{
639 int err;
640
641 err = ahash_prepare_alg(&inst->alg);
642 if (err)
643 return err;
644
645 return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
646}
647EXPORT_SYMBOL_GPL(ahash_register_instance);
648
649void ahash_free_instance(struct crypto_instance *inst)
650{
651 crypto_drop_spawn(crypto_instance_ctx(inst));
652 kfree(ahash_instance(inst));
653}
654EXPORT_SYMBOL_GPL(ahash_free_instance);
655
656int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
657 struct hash_alg_common *alg,
658 struct crypto_instance *inst)
659{
660 return crypto_init_spawn2(&spawn->base, &alg->base, inst,
661 &crypto_ahash_type);
662}
663EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn);
664
665struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
666{
667 struct crypto_alg *alg;
668
669 alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask);
670 return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg);
671}
672EXPORT_SYMBOL_GPL(ahash_attr_alg);
673
674bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
675{
676 struct crypto_alg *alg = &halg->base;
677
678 if (alg->cra_type != &crypto_ahash_type)
679 return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg));
680
681 return __crypto_ahash_alg(alg)->setkey != NULL;
682}
683EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey);
684
685MODULE_LICENSE("GPL");
686MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Asynchronous Cryptographic Hash operations.
4 *
5 * This is the implementation of the ahash (asynchronous hash) API. It differs
6 * from shash (synchronous hash) in that ahash supports asynchronous operations,
7 * and it hashes data from scatterlists instead of virtually addressed buffers.
8 *
9 * The ahash API provides access to both ahash and shash algorithms. The shash
10 * API only provides access to shash algorithms.
11 *
12 * Copyright (c) 2008 Loc Ho <lho@amcc.com>
13 */
14
15#include <crypto/scatterwalk.h>
16#include <linux/cryptouser.h>
17#include <linux/err.h>
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/sched.h>
21#include <linux/slab.h>
22#include <linux/seq_file.h>
23#include <linux/string.h>
24#include <net/netlink.h>
25
26#include "hash.h"
27
28#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e
29
30static inline struct crypto_istat_hash *ahash_get_stat(struct ahash_alg *alg)
31{
32 return hash_get_stat(&alg->halg);
33}
34
35static inline int crypto_ahash_errstat(struct ahash_alg *alg, int err)
36{
37 if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
38 return err;
39
40 if (err && err != -EINPROGRESS && err != -EBUSY)
41 atomic64_inc(&ahash_get_stat(alg)->err_cnt);
42
43 return err;
44}
45
46/*
47 * For an ahash tfm that is using an shash algorithm (instead of an ahash
48 * algorithm), this returns the underlying shash tfm.
49 */
50static inline struct crypto_shash *ahash_to_shash(struct crypto_ahash *tfm)
51{
52 return *(struct crypto_shash **)crypto_ahash_ctx(tfm);
53}
54
55static inline struct shash_desc *prepare_shash_desc(struct ahash_request *req,
56 struct crypto_ahash *tfm)
57{
58 struct shash_desc *desc = ahash_request_ctx(req);
59
60 desc->tfm = ahash_to_shash(tfm);
61 return desc;
62}
63
64int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc)
65{
66 struct crypto_hash_walk walk;
67 int nbytes;
68
69 for (nbytes = crypto_hash_walk_first(req, &walk); nbytes > 0;
70 nbytes = crypto_hash_walk_done(&walk, nbytes))
71 nbytes = crypto_shash_update(desc, walk.data, nbytes);
72
73 return nbytes;
74}
75EXPORT_SYMBOL_GPL(shash_ahash_update);
76
77int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc)
78{
79 struct crypto_hash_walk walk;
80 int nbytes;
81
82 nbytes = crypto_hash_walk_first(req, &walk);
83 if (!nbytes)
84 return crypto_shash_final(desc, req->result);
85
86 do {
87 nbytes = crypto_hash_walk_last(&walk) ?
88 crypto_shash_finup(desc, walk.data, nbytes,
89 req->result) :
90 crypto_shash_update(desc, walk.data, nbytes);
91 nbytes = crypto_hash_walk_done(&walk, nbytes);
92 } while (nbytes > 0);
93
94 return nbytes;
95}
96EXPORT_SYMBOL_GPL(shash_ahash_finup);
97
98int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc)
99{
100 unsigned int nbytes = req->nbytes;
101 struct scatterlist *sg;
102 unsigned int offset;
103 int err;
104
105 if (nbytes &&
106 (sg = req->src, offset = sg->offset,
107 nbytes <= min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) {
108 void *data;
109
110 data = kmap_local_page(sg_page(sg));
111 err = crypto_shash_digest(desc, data + offset, nbytes,
112 req->result);
113 kunmap_local(data);
114 } else
115 err = crypto_shash_init(desc) ?:
116 shash_ahash_finup(req, desc);
117
118 return err;
119}
120EXPORT_SYMBOL_GPL(shash_ahash_digest);
121
122static void crypto_exit_ahash_using_shash(struct crypto_tfm *tfm)
123{
124 struct crypto_shash **ctx = crypto_tfm_ctx(tfm);
125
126 crypto_free_shash(*ctx);
127}
128
129static int crypto_init_ahash_using_shash(struct crypto_tfm *tfm)
130{
131 struct crypto_alg *calg = tfm->__crt_alg;
132 struct crypto_ahash *crt = __crypto_ahash_cast(tfm);
133 struct crypto_shash **ctx = crypto_tfm_ctx(tfm);
134 struct crypto_shash *shash;
135
136 if (!crypto_mod_get(calg))
137 return -EAGAIN;
138
139 shash = crypto_create_tfm(calg, &crypto_shash_type);
140 if (IS_ERR(shash)) {
141 crypto_mod_put(calg);
142 return PTR_ERR(shash);
143 }
144
145 crt->using_shash = true;
146 *ctx = shash;
147 tfm->exit = crypto_exit_ahash_using_shash;
148
149 crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) &
150 CRYPTO_TFM_NEED_KEY);
151 crt->reqsize = sizeof(struct shash_desc) + crypto_shash_descsize(shash);
152
153 return 0;
154}
155
156static int hash_walk_next(struct crypto_hash_walk *walk)
157{
158 unsigned int offset = walk->offset;
159 unsigned int nbytes = min(walk->entrylen,
160 ((unsigned int)(PAGE_SIZE)) - offset);
161
162 walk->data = kmap_local_page(walk->pg);
163 walk->data += offset;
164 walk->entrylen -= nbytes;
165 return nbytes;
166}
167
168static int hash_walk_new_entry(struct crypto_hash_walk *walk)
169{
170 struct scatterlist *sg;
171
172 sg = walk->sg;
173 walk->offset = sg->offset;
174 walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
175 walk->offset = offset_in_page(walk->offset);
176 walk->entrylen = sg->length;
177
178 if (walk->entrylen > walk->total)
179 walk->entrylen = walk->total;
180 walk->total -= walk->entrylen;
181
182 return hash_walk_next(walk);
183}
184
185int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
186{
187 walk->data -= walk->offset;
188
189 kunmap_local(walk->data);
190 crypto_yield(walk->flags);
191
192 if (err)
193 return err;
194
195 if (walk->entrylen) {
196 walk->offset = 0;
197 walk->pg++;
198 return hash_walk_next(walk);
199 }
200
201 if (!walk->total)
202 return 0;
203
204 walk->sg = sg_next(walk->sg);
205
206 return hash_walk_new_entry(walk);
207}
208EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
209
210int crypto_hash_walk_first(struct ahash_request *req,
211 struct crypto_hash_walk *walk)
212{
213 walk->total = req->nbytes;
214
215 if (!walk->total) {
216 walk->entrylen = 0;
217 return 0;
218 }
219
220 walk->sg = req->src;
221 walk->flags = req->base.flags;
222
223 return hash_walk_new_entry(walk);
224}
225EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
226
227static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
228 unsigned int keylen)
229{
230 return -ENOSYS;
231}
232
233static void ahash_set_needkey(struct crypto_ahash *tfm, struct ahash_alg *alg)
234{
235 if (alg->setkey != ahash_nosetkey &&
236 !(alg->halg.base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
237 crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
238}
239
240int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
241 unsigned int keylen)
242{
243 if (likely(tfm->using_shash)) {
244 struct crypto_shash *shash = ahash_to_shash(tfm);
245 int err;
246
247 err = crypto_shash_setkey(shash, key, keylen);
248 if (unlikely(err)) {
249 crypto_ahash_set_flags(tfm,
250 crypto_shash_get_flags(shash) &
251 CRYPTO_TFM_NEED_KEY);
252 return err;
253 }
254 } else {
255 struct ahash_alg *alg = crypto_ahash_alg(tfm);
256 int err;
257
258 err = alg->setkey(tfm, key, keylen);
259 if (unlikely(err)) {
260 ahash_set_needkey(tfm, alg);
261 return err;
262 }
263 }
264 crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
265 return 0;
266}
267EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
268
269int crypto_ahash_init(struct ahash_request *req)
270{
271 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
272
273 if (likely(tfm->using_shash))
274 return crypto_shash_init(prepare_shash_desc(req, tfm));
275 if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
276 return -ENOKEY;
277 return crypto_ahash_alg(tfm)->init(req);
278}
279EXPORT_SYMBOL_GPL(crypto_ahash_init);
280
281static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt,
282 bool has_state)
283{
284 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
285 unsigned int ds = crypto_ahash_digestsize(tfm);
286 struct ahash_request *subreq;
287 unsigned int subreq_size;
288 unsigned int reqsize;
289 u8 *result;
290 gfp_t gfp;
291 u32 flags;
292
293 subreq_size = sizeof(*subreq);
294 reqsize = crypto_ahash_reqsize(tfm);
295 reqsize = ALIGN(reqsize, crypto_tfm_ctx_alignment());
296 subreq_size += reqsize;
297 subreq_size += ds;
298
299 flags = ahash_request_flags(req);
300 gfp = (flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC;
301 subreq = kmalloc(subreq_size, gfp);
302 if (!subreq)
303 return -ENOMEM;
304
305 ahash_request_set_tfm(subreq, tfm);
306 ahash_request_set_callback(subreq, flags, cplt, req);
307
308 result = (u8 *)(subreq + 1) + reqsize;
309
310 ahash_request_set_crypt(subreq, req->src, result, req->nbytes);
311
312 if (has_state) {
313 void *state;
314
315 state = kmalloc(crypto_ahash_statesize(tfm), gfp);
316 if (!state) {
317 kfree(subreq);
318 return -ENOMEM;
319 }
320
321 crypto_ahash_export(req, state);
322 crypto_ahash_import(subreq, state);
323 kfree_sensitive(state);
324 }
325
326 req->priv = subreq;
327
328 return 0;
329}
330
331static void ahash_restore_req(struct ahash_request *req, int err)
332{
333 struct ahash_request *subreq = req->priv;
334
335 if (!err)
336 memcpy(req->result, subreq->result,
337 crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
338
339 req->priv = NULL;
340
341 kfree_sensitive(subreq);
342}
343
344int crypto_ahash_update(struct ahash_request *req)
345{
346 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
347 struct ahash_alg *alg;
348
349 if (likely(tfm->using_shash))
350 return shash_ahash_update(req, ahash_request_ctx(req));
351
352 alg = crypto_ahash_alg(tfm);
353 if (IS_ENABLED(CONFIG_CRYPTO_STATS))
354 atomic64_add(req->nbytes, &ahash_get_stat(alg)->hash_tlen);
355 return crypto_ahash_errstat(alg, alg->update(req));
356}
357EXPORT_SYMBOL_GPL(crypto_ahash_update);
358
359int crypto_ahash_final(struct ahash_request *req)
360{
361 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
362 struct ahash_alg *alg;
363
364 if (likely(tfm->using_shash))
365 return crypto_shash_final(ahash_request_ctx(req), req->result);
366
367 alg = crypto_ahash_alg(tfm);
368 if (IS_ENABLED(CONFIG_CRYPTO_STATS))
369 atomic64_inc(&ahash_get_stat(alg)->hash_cnt);
370 return crypto_ahash_errstat(alg, alg->final(req));
371}
372EXPORT_SYMBOL_GPL(crypto_ahash_final);
373
374int crypto_ahash_finup(struct ahash_request *req)
375{
376 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
377 struct ahash_alg *alg;
378
379 if (likely(tfm->using_shash))
380 return shash_ahash_finup(req, ahash_request_ctx(req));
381
382 alg = crypto_ahash_alg(tfm);
383 if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
384 struct crypto_istat_hash *istat = ahash_get_stat(alg);
385
386 atomic64_inc(&istat->hash_cnt);
387 atomic64_add(req->nbytes, &istat->hash_tlen);
388 }
389 return crypto_ahash_errstat(alg, alg->finup(req));
390}
391EXPORT_SYMBOL_GPL(crypto_ahash_finup);
392
393int crypto_ahash_digest(struct ahash_request *req)
394{
395 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
396 struct ahash_alg *alg;
397 int err;
398
399 if (likely(tfm->using_shash))
400 return shash_ahash_digest(req, prepare_shash_desc(req, tfm));
401
402 alg = crypto_ahash_alg(tfm);
403 if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
404 struct crypto_istat_hash *istat = ahash_get_stat(alg);
405
406 atomic64_inc(&istat->hash_cnt);
407 atomic64_add(req->nbytes, &istat->hash_tlen);
408 }
409
410 if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
411 err = -ENOKEY;
412 else
413 err = alg->digest(req);
414
415 return crypto_ahash_errstat(alg, err);
416}
417EXPORT_SYMBOL_GPL(crypto_ahash_digest);
418
419static void ahash_def_finup_done2(void *data, int err)
420{
421 struct ahash_request *areq = data;
422
423 if (err == -EINPROGRESS)
424 return;
425
426 ahash_restore_req(areq, err);
427
428 ahash_request_complete(areq, err);
429}
430
431static int ahash_def_finup_finish1(struct ahash_request *req, int err)
432{
433 struct ahash_request *subreq = req->priv;
434
435 if (err)
436 goto out;
437
438 subreq->base.complete = ahash_def_finup_done2;
439
440 err = crypto_ahash_alg(crypto_ahash_reqtfm(req))->final(subreq);
441 if (err == -EINPROGRESS || err == -EBUSY)
442 return err;
443
444out:
445 ahash_restore_req(req, err);
446 return err;
447}
448
449static void ahash_def_finup_done1(void *data, int err)
450{
451 struct ahash_request *areq = data;
452 struct ahash_request *subreq;
453
454 if (err == -EINPROGRESS)
455 goto out;
456
457 subreq = areq->priv;
458 subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
459
460 err = ahash_def_finup_finish1(areq, err);
461 if (err == -EINPROGRESS || err == -EBUSY)
462 return;
463
464out:
465 ahash_request_complete(areq, err);
466}
467
468static int ahash_def_finup(struct ahash_request *req)
469{
470 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
471 int err;
472
473 err = ahash_save_req(req, ahash_def_finup_done1, true);
474 if (err)
475 return err;
476
477 err = crypto_ahash_alg(tfm)->update(req->priv);
478 if (err == -EINPROGRESS || err == -EBUSY)
479 return err;
480
481 return ahash_def_finup_finish1(req, err);
482}
483
484int crypto_ahash_export(struct ahash_request *req, void *out)
485{
486 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
487
488 if (likely(tfm->using_shash))
489 return crypto_shash_export(ahash_request_ctx(req), out);
490 return crypto_ahash_alg(tfm)->export(req, out);
491}
492EXPORT_SYMBOL_GPL(crypto_ahash_export);
493
494int crypto_ahash_import(struct ahash_request *req, const void *in)
495{
496 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
497
498 if (likely(tfm->using_shash))
499 return crypto_shash_import(prepare_shash_desc(req, tfm), in);
500 if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
501 return -ENOKEY;
502 return crypto_ahash_alg(tfm)->import(req, in);
503}
504EXPORT_SYMBOL_GPL(crypto_ahash_import);
505
506static void crypto_ahash_exit_tfm(struct crypto_tfm *tfm)
507{
508 struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
509 struct ahash_alg *alg = crypto_ahash_alg(hash);
510
511 alg->exit_tfm(hash);
512}
513
514static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
515{
516 struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
517 struct ahash_alg *alg = crypto_ahash_alg(hash);
518
519 crypto_ahash_set_statesize(hash, alg->halg.statesize);
520
521 if (tfm->__crt_alg->cra_type == &crypto_shash_type)
522 return crypto_init_ahash_using_shash(tfm);
523
524 ahash_set_needkey(hash, alg);
525
526 if (alg->exit_tfm)
527 tfm->exit = crypto_ahash_exit_tfm;
528
529 return alg->init_tfm ? alg->init_tfm(hash) : 0;
530}
531
532static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
533{
534 if (alg->cra_type == &crypto_shash_type)
535 return sizeof(struct crypto_shash *);
536
537 return crypto_alg_extsize(alg);
538}
539
540static void crypto_ahash_free_instance(struct crypto_instance *inst)
541{
542 struct ahash_instance *ahash = ahash_instance(inst);
543
544 ahash->free(ahash);
545}
546
547static int __maybe_unused crypto_ahash_report(
548 struct sk_buff *skb, struct crypto_alg *alg)
549{
550 struct crypto_report_hash rhash;
551
552 memset(&rhash, 0, sizeof(rhash));
553
554 strscpy(rhash.type, "ahash", sizeof(rhash.type));
555
556 rhash.blocksize = alg->cra_blocksize;
557 rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
558
559 return nla_put(skb, CRYPTOCFGA_REPORT_HASH, sizeof(rhash), &rhash);
560}
561
562static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
563 __maybe_unused;
564static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
565{
566 seq_printf(m, "type : ahash\n");
567 seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
568 "yes" : "no");
569 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
570 seq_printf(m, "digestsize : %u\n",
571 __crypto_hash_alg_common(alg)->digestsize);
572}
573
574static int __maybe_unused crypto_ahash_report_stat(
575 struct sk_buff *skb, struct crypto_alg *alg)
576{
577 return crypto_hash_report_stat(skb, alg, "ahash");
578}
579
580static const struct crypto_type crypto_ahash_type = {
581 .extsize = crypto_ahash_extsize,
582 .init_tfm = crypto_ahash_init_tfm,
583 .free = crypto_ahash_free_instance,
584#ifdef CONFIG_PROC_FS
585 .show = crypto_ahash_show,
586#endif
587#if IS_ENABLED(CONFIG_CRYPTO_USER)
588 .report = crypto_ahash_report,
589#endif
590#ifdef CONFIG_CRYPTO_STATS
591 .report_stat = crypto_ahash_report_stat,
592#endif
593 .maskclear = ~CRYPTO_ALG_TYPE_MASK,
594 .maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
595 .type = CRYPTO_ALG_TYPE_AHASH,
596 .tfmsize = offsetof(struct crypto_ahash, base),
597};
598
599int crypto_grab_ahash(struct crypto_ahash_spawn *spawn,
600 struct crypto_instance *inst,
601 const char *name, u32 type, u32 mask)
602{
603 spawn->base.frontend = &crypto_ahash_type;
604 return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
605}
606EXPORT_SYMBOL_GPL(crypto_grab_ahash);
607
608struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
609 u32 mask)
610{
611 return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
612}
613EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
614
615int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
616{
617 return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask);
618}
619EXPORT_SYMBOL_GPL(crypto_has_ahash);
620
621struct crypto_ahash *crypto_clone_ahash(struct crypto_ahash *hash)
622{
623 struct hash_alg_common *halg = crypto_hash_alg_common(hash);
624 struct crypto_tfm *tfm = crypto_ahash_tfm(hash);
625 struct crypto_ahash *nhash;
626 struct ahash_alg *alg;
627 int err;
628
629 if (!crypto_hash_alg_has_setkey(halg)) {
630 tfm = crypto_tfm_get(tfm);
631 if (IS_ERR(tfm))
632 return ERR_CAST(tfm);
633
634 return hash;
635 }
636
637 nhash = crypto_clone_tfm(&crypto_ahash_type, tfm);
638
639 if (IS_ERR(nhash))
640 return nhash;
641
642 nhash->reqsize = hash->reqsize;
643 nhash->statesize = hash->statesize;
644
645 if (likely(hash->using_shash)) {
646 struct crypto_shash **nctx = crypto_ahash_ctx(nhash);
647 struct crypto_shash *shash;
648
649 shash = crypto_clone_shash(ahash_to_shash(hash));
650 if (IS_ERR(shash)) {
651 err = PTR_ERR(shash);
652 goto out_free_nhash;
653 }
654 nhash->using_shash = true;
655 *nctx = shash;
656 return nhash;
657 }
658
659 err = -ENOSYS;
660 alg = crypto_ahash_alg(hash);
661 if (!alg->clone_tfm)
662 goto out_free_nhash;
663
664 err = alg->clone_tfm(nhash, hash);
665 if (err)
666 goto out_free_nhash;
667
668 return nhash;
669
670out_free_nhash:
671 crypto_free_ahash(nhash);
672 return ERR_PTR(err);
673}
674EXPORT_SYMBOL_GPL(crypto_clone_ahash);
675
676static int ahash_prepare_alg(struct ahash_alg *alg)
677{
678 struct crypto_alg *base = &alg->halg.base;
679 int err;
680
681 if (alg->halg.statesize == 0)
682 return -EINVAL;
683
684 err = hash_prepare_alg(&alg->halg);
685 if (err)
686 return err;
687
688 base->cra_type = &crypto_ahash_type;
689 base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
690
691 if (!alg->finup)
692 alg->finup = ahash_def_finup;
693 if (!alg->setkey)
694 alg->setkey = ahash_nosetkey;
695
696 return 0;
697}
698
699int crypto_register_ahash(struct ahash_alg *alg)
700{
701 struct crypto_alg *base = &alg->halg.base;
702 int err;
703
704 err = ahash_prepare_alg(alg);
705 if (err)
706 return err;
707
708 return crypto_register_alg(base);
709}
710EXPORT_SYMBOL_GPL(crypto_register_ahash);
711
712void crypto_unregister_ahash(struct ahash_alg *alg)
713{
714 crypto_unregister_alg(&alg->halg.base);
715}
716EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
717
718int crypto_register_ahashes(struct ahash_alg *algs, int count)
719{
720 int i, ret;
721
722 for (i = 0; i < count; i++) {
723 ret = crypto_register_ahash(&algs[i]);
724 if (ret)
725 goto err;
726 }
727
728 return 0;
729
730err:
731 for (--i; i >= 0; --i)
732 crypto_unregister_ahash(&algs[i]);
733
734 return ret;
735}
736EXPORT_SYMBOL_GPL(crypto_register_ahashes);
737
738void crypto_unregister_ahashes(struct ahash_alg *algs, int count)
739{
740 int i;
741
742 for (i = count - 1; i >= 0; --i)
743 crypto_unregister_ahash(&algs[i]);
744}
745EXPORT_SYMBOL_GPL(crypto_unregister_ahashes);
746
747int ahash_register_instance(struct crypto_template *tmpl,
748 struct ahash_instance *inst)
749{
750 int err;
751
752 if (WARN_ON(!inst->free))
753 return -EINVAL;
754
755 err = ahash_prepare_alg(&inst->alg);
756 if (err)
757 return err;
758
759 return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
760}
761EXPORT_SYMBOL_GPL(ahash_register_instance);
762
763bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
764{
765 struct crypto_alg *alg = &halg->base;
766
767 if (alg->cra_type == &crypto_shash_type)
768 return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg));
769
770 return __crypto_ahash_alg(alg)->setkey != ahash_nosetkey;
771}
772EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey);
773
774MODULE_LICENSE("GPL");
775MODULE_DESCRIPTION("Asynchronous cryptographic hash type");