Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Asynchronous Cryptographic Hash operations.
4 *
5 * This is the asynchronous version of hash.c with notification of
6 * completion via a callback.
7 *
8 * Copyright (c) 2008 Loc Ho <lho@amcc.com>
9 */
10
11#include <crypto/internal/hash.h>
12#include <crypto/scatterwalk.h>
13#include <linux/bug.h>
14#include <linux/err.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/sched.h>
18#include <linux/slab.h>
19#include <linux/seq_file.h>
20#include <linux/cryptouser.h>
21#include <linux/compiler.h>
22#include <net/netlink.h>
23
24#include "internal.h"
25
26static const struct crypto_type crypto_ahash_type;
27
28struct ahash_request_priv {
29 crypto_completion_t complete;
30 void *data;
31 u8 *result;
32 u32 flags;
33 void *ubuf[] CRYPTO_MINALIGN_ATTR;
34};
35
36static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
37{
38 return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
39 halg);
40}
41
42static int hash_walk_next(struct crypto_hash_walk *walk)
43{
44 unsigned int alignmask = walk->alignmask;
45 unsigned int offset = walk->offset;
46 unsigned int nbytes = min(walk->entrylen,
47 ((unsigned int)(PAGE_SIZE)) - offset);
48
49 if (walk->flags & CRYPTO_ALG_ASYNC)
50 walk->data = kmap(walk->pg);
51 else
52 walk->data = kmap_atomic(walk->pg);
53 walk->data += offset;
54
55 if (offset & alignmask) {
56 unsigned int unaligned = alignmask + 1 - (offset & alignmask);
57
58 if (nbytes > unaligned)
59 nbytes = unaligned;
60 }
61
62 walk->entrylen -= nbytes;
63 return nbytes;
64}
65
66static int hash_walk_new_entry(struct crypto_hash_walk *walk)
67{
68 struct scatterlist *sg;
69
70 sg = walk->sg;
71 walk->offset = sg->offset;
72 walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
73 walk->offset = offset_in_page(walk->offset);
74 walk->entrylen = sg->length;
75
76 if (walk->entrylen > walk->total)
77 walk->entrylen = walk->total;
78 walk->total -= walk->entrylen;
79
80 return hash_walk_next(walk);
81}
82
83int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
84{
85 unsigned int alignmask = walk->alignmask;
86
87 walk->data -= walk->offset;
88
89 if (walk->entrylen && (walk->offset & alignmask) && !err) {
90 unsigned int nbytes;
91
92 walk->offset = ALIGN(walk->offset, alignmask + 1);
93 nbytes = min(walk->entrylen,
94 (unsigned int)(PAGE_SIZE - walk->offset));
95 if (nbytes) {
96 walk->entrylen -= nbytes;
97 walk->data += walk->offset;
98 return nbytes;
99 }
100 }
101
102 if (walk->flags & CRYPTO_ALG_ASYNC)
103 kunmap(walk->pg);
104 else {
105 kunmap_atomic(walk->data);
106 /*
107 * The may sleep test only makes sense for sync users.
108 * Async users don't need to sleep here anyway.
109 */
110 crypto_yield(walk->flags);
111 }
112
113 if (err)
114 return err;
115
116 if (walk->entrylen) {
117 walk->offset = 0;
118 walk->pg++;
119 return hash_walk_next(walk);
120 }
121
122 if (!walk->total)
123 return 0;
124
125 walk->sg = sg_next(walk->sg);
126
127 return hash_walk_new_entry(walk);
128}
129EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
130
131int crypto_hash_walk_first(struct ahash_request *req,
132 struct crypto_hash_walk *walk)
133{
134 walk->total = req->nbytes;
135
136 if (!walk->total) {
137 walk->entrylen = 0;
138 return 0;
139 }
140
141 walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
142 walk->sg = req->src;
143 walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
144
145 return hash_walk_new_entry(walk);
146}
147EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
148
149int crypto_ahash_walk_first(struct ahash_request *req,
150 struct crypto_hash_walk *walk)
151{
152 walk->total = req->nbytes;
153
154 if (!walk->total) {
155 walk->entrylen = 0;
156 return 0;
157 }
158
159 walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
160 walk->sg = req->src;
161 walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
162 walk->flags |= CRYPTO_ALG_ASYNC;
163
164 BUILD_BUG_ON(CRYPTO_TFM_REQ_MASK & CRYPTO_ALG_ASYNC);
165
166 return hash_walk_new_entry(walk);
167}
168EXPORT_SYMBOL_GPL(crypto_ahash_walk_first);
169
170static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
171 unsigned int keylen)
172{
173 unsigned long alignmask = crypto_ahash_alignmask(tfm);
174 int ret;
175 u8 *buffer, *alignbuffer;
176 unsigned long absize;
177
178 absize = keylen + alignmask;
179 buffer = kmalloc(absize, GFP_KERNEL);
180 if (!buffer)
181 return -ENOMEM;
182
183 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
184 memcpy(alignbuffer, key, keylen);
185 ret = tfm->setkey(tfm, alignbuffer, keylen);
186 kfree_sensitive(buffer);
187 return ret;
188}
189
190static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
191 unsigned int keylen)
192{
193 return -ENOSYS;
194}
195
196static void ahash_set_needkey(struct crypto_ahash *tfm)
197{
198 const struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
199
200 if (tfm->setkey != ahash_nosetkey &&
201 !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
202 crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
203}
204
205int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
206 unsigned int keylen)
207{
208 unsigned long alignmask = crypto_ahash_alignmask(tfm);
209 int err;
210
211 if ((unsigned long)key & alignmask)
212 err = ahash_setkey_unaligned(tfm, key, keylen);
213 else
214 err = tfm->setkey(tfm, key, keylen);
215
216 if (unlikely(err)) {
217 ahash_set_needkey(tfm);
218 return err;
219 }
220
221 crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
222 return 0;
223}
224EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
225
226static inline unsigned int ahash_align_buffer_size(unsigned len,
227 unsigned long mask)
228{
229 return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
230}
231
232static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
233{
234 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
235 unsigned long alignmask = crypto_ahash_alignmask(tfm);
236 unsigned int ds = crypto_ahash_digestsize(tfm);
237 struct ahash_request_priv *priv;
238
239 priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
240 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
241 GFP_KERNEL : GFP_ATOMIC);
242 if (!priv)
243 return -ENOMEM;
244
245 /*
246 * WARNING: Voodoo programming below!
247 *
248 * The code below is obscure and hard to understand, thus explanation
249 * is necessary. See include/crypto/hash.h and include/linux/crypto.h
250 * to understand the layout of structures used here!
251 *
252 * The code here will replace portions of the ORIGINAL request with
253 * pointers to new code and buffers so the hashing operation can store
254 * the result in aligned buffer. We will call the modified request
255 * an ADJUSTED request.
256 *
257 * The newly mangled request will look as such:
258 *
259 * req {
260 * .result = ADJUSTED[new aligned buffer]
261 * .base.complete = ADJUSTED[pointer to completion function]
262 * .base.data = ADJUSTED[*req (pointer to self)]
263 * .priv = ADJUSTED[new priv] {
264 * .result = ORIGINAL(result)
265 * .complete = ORIGINAL(base.complete)
266 * .data = ORIGINAL(base.data)
267 * }
268 */
269
270 priv->result = req->result;
271 priv->complete = req->base.complete;
272 priv->data = req->base.data;
273 priv->flags = req->base.flags;
274
275 /*
276 * WARNING: We do not backup req->priv here! The req->priv
277 * is for internal use of the Crypto API and the
278 * user must _NOT_ _EVER_ depend on it's content!
279 */
280
281 req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
282 req->base.complete = cplt;
283 req->base.data = req;
284 req->priv = priv;
285
286 return 0;
287}
288
289static void ahash_restore_req(struct ahash_request *req, int err)
290{
291 struct ahash_request_priv *priv = req->priv;
292
293 if (!err)
294 memcpy(priv->result, req->result,
295 crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
296
297 /* Restore the original crypto request. */
298 req->result = priv->result;
299
300 ahash_request_set_callback(req, priv->flags,
301 priv->complete, priv->data);
302 req->priv = NULL;
303
304 /* Free the req->priv.priv from the ADJUSTED request. */
305 kfree_sensitive(priv);
306}
307
308static void ahash_notify_einprogress(struct ahash_request *req)
309{
310 struct ahash_request_priv *priv = req->priv;
311 struct crypto_async_request oreq;
312
313 oreq.data = priv->data;
314
315 priv->complete(&oreq, -EINPROGRESS);
316}
317
318static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
319{
320 struct ahash_request *areq = req->data;
321
322 if (err == -EINPROGRESS) {
323 ahash_notify_einprogress(areq);
324 return;
325 }
326
327 /*
328 * Restore the original request, see ahash_op_unaligned() for what
329 * goes where.
330 *
331 * The "struct ahash_request *req" here is in fact the "req.base"
332 * from the ADJUSTED request from ahash_op_unaligned(), thus as it
333 * is a pointer to self, it is also the ADJUSTED "req" .
334 */
335
336 /* First copy req->result into req->priv.result */
337 ahash_restore_req(areq, err);
338
339 /* Complete the ORIGINAL request. */
340 areq->base.complete(&areq->base, err);
341}
342
343static int ahash_op_unaligned(struct ahash_request *req,
344 int (*op)(struct ahash_request *))
345{
346 int err;
347
348 err = ahash_save_req(req, ahash_op_unaligned_done);
349 if (err)
350 return err;
351
352 err = op(req);
353 if (err == -EINPROGRESS || err == -EBUSY)
354 return err;
355
356 ahash_restore_req(req, err);
357
358 return err;
359}
360
361static int crypto_ahash_op(struct ahash_request *req,
362 int (*op)(struct ahash_request *))
363{
364 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
365 unsigned long alignmask = crypto_ahash_alignmask(tfm);
366
367 if ((unsigned long)req->result & alignmask)
368 return ahash_op_unaligned(req, op);
369
370 return op(req);
371}
372
373int crypto_ahash_final(struct ahash_request *req)
374{
375 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
376 struct crypto_alg *alg = tfm->base.__crt_alg;
377 unsigned int nbytes = req->nbytes;
378 int ret;
379
380 crypto_stats_get(alg);
381 ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
382 crypto_stats_ahash_final(nbytes, ret, alg);
383 return ret;
384}
385EXPORT_SYMBOL_GPL(crypto_ahash_final);
386
387int crypto_ahash_finup(struct ahash_request *req)
388{
389 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
390 struct crypto_alg *alg = tfm->base.__crt_alg;
391 unsigned int nbytes = req->nbytes;
392 int ret;
393
394 crypto_stats_get(alg);
395 ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
396 crypto_stats_ahash_final(nbytes, ret, alg);
397 return ret;
398}
399EXPORT_SYMBOL_GPL(crypto_ahash_finup);
400
401int crypto_ahash_digest(struct ahash_request *req)
402{
403 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
404 struct crypto_alg *alg = tfm->base.__crt_alg;
405 unsigned int nbytes = req->nbytes;
406 int ret;
407
408 crypto_stats_get(alg);
409 if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
410 ret = -ENOKEY;
411 else
412 ret = crypto_ahash_op(req, tfm->digest);
413 crypto_stats_ahash_final(nbytes, ret, alg);
414 return ret;
415}
416EXPORT_SYMBOL_GPL(crypto_ahash_digest);
417
418static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
419{
420 struct ahash_request *areq = req->data;
421
422 if (err == -EINPROGRESS)
423 return;
424
425 ahash_restore_req(areq, err);
426
427 areq->base.complete(&areq->base, err);
428}
429
430static int ahash_def_finup_finish1(struct ahash_request *req, int err)
431{
432 if (err)
433 goto out;
434
435 req->base.complete = ahash_def_finup_done2;
436
437 err = crypto_ahash_reqtfm(req)->final(req);
438 if (err == -EINPROGRESS || err == -EBUSY)
439 return err;
440
441out:
442 ahash_restore_req(req, err);
443 return err;
444}
445
446static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
447{
448 struct ahash_request *areq = req->data;
449
450 if (err == -EINPROGRESS) {
451 ahash_notify_einprogress(areq);
452 return;
453 }
454
455 areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
456
457 err = ahash_def_finup_finish1(areq, err);
458 if (areq->priv)
459 return;
460
461 areq->base.complete(&areq->base, err);
462}
463
464static int ahash_def_finup(struct ahash_request *req)
465{
466 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
467 int err;
468
469 err = ahash_save_req(req, ahash_def_finup_done1);
470 if (err)
471 return err;
472
473 err = tfm->update(req);
474 if (err == -EINPROGRESS || err == -EBUSY)
475 return err;
476
477 return ahash_def_finup_finish1(req, err);
478}
479
480static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
481{
482 struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
483 struct ahash_alg *alg = crypto_ahash_alg(hash);
484
485 hash->setkey = ahash_nosetkey;
486
487 if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
488 return crypto_init_shash_ops_async(tfm);
489
490 hash->init = alg->init;
491 hash->update = alg->update;
492 hash->final = alg->final;
493 hash->finup = alg->finup ?: ahash_def_finup;
494 hash->digest = alg->digest;
495 hash->export = alg->export;
496 hash->import = alg->import;
497
498 if (alg->setkey) {
499 hash->setkey = alg->setkey;
500 ahash_set_needkey(hash);
501 }
502
503 return 0;
504}
505
506static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
507{
508 if (alg->cra_type != &crypto_ahash_type)
509 return sizeof(struct crypto_shash *);
510
511 return crypto_alg_extsize(alg);
512}
513
514static void crypto_ahash_free_instance(struct crypto_instance *inst)
515{
516 struct ahash_instance *ahash = ahash_instance(inst);
517
518 ahash->free(ahash);
519}
520
521#ifdef CONFIG_NET
522static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
523{
524 struct crypto_report_hash rhash;
525
526 memset(&rhash, 0, sizeof(rhash));
527
528 strscpy(rhash.type, "ahash", sizeof(rhash.type));
529
530 rhash.blocksize = alg->cra_blocksize;
531 rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
532
533 return nla_put(skb, CRYPTOCFGA_REPORT_HASH, sizeof(rhash), &rhash);
534}
535#else
536static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
537{
538 return -ENOSYS;
539}
540#endif
541
542static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
543 __maybe_unused;
544static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
545{
546 seq_printf(m, "type : ahash\n");
547 seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
548 "yes" : "no");
549 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
550 seq_printf(m, "digestsize : %u\n",
551 __crypto_hash_alg_common(alg)->digestsize);
552}
553
554static const struct crypto_type crypto_ahash_type = {
555 .extsize = crypto_ahash_extsize,
556 .init_tfm = crypto_ahash_init_tfm,
557 .free = crypto_ahash_free_instance,
558#ifdef CONFIG_PROC_FS
559 .show = crypto_ahash_show,
560#endif
561 .report = crypto_ahash_report,
562 .maskclear = ~CRYPTO_ALG_TYPE_MASK,
563 .maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
564 .type = CRYPTO_ALG_TYPE_AHASH,
565 .tfmsize = offsetof(struct crypto_ahash, base),
566};
567
568int crypto_grab_ahash(struct crypto_ahash_spawn *spawn,
569 struct crypto_instance *inst,
570 const char *name, u32 type, u32 mask)
571{
572 spawn->base.frontend = &crypto_ahash_type;
573 return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
574}
575EXPORT_SYMBOL_GPL(crypto_grab_ahash);
576
577struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
578 u32 mask)
579{
580 return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
581}
582EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
583
584int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
585{
586 return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask);
587}
588EXPORT_SYMBOL_GPL(crypto_has_ahash);
589
590static int ahash_prepare_alg(struct ahash_alg *alg)
591{
592 struct crypto_alg *base = &alg->halg.base;
593
594 if (alg->halg.digestsize > HASH_MAX_DIGESTSIZE ||
595 alg->halg.statesize > HASH_MAX_STATESIZE ||
596 alg->halg.statesize == 0)
597 return -EINVAL;
598
599 base->cra_type = &crypto_ahash_type;
600 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
601 base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
602
603 return 0;
604}
605
606int crypto_register_ahash(struct ahash_alg *alg)
607{
608 struct crypto_alg *base = &alg->halg.base;
609 int err;
610
611 err = ahash_prepare_alg(alg);
612 if (err)
613 return err;
614
615 return crypto_register_alg(base);
616}
617EXPORT_SYMBOL_GPL(crypto_register_ahash);
618
619void crypto_unregister_ahash(struct ahash_alg *alg)
620{
621 crypto_unregister_alg(&alg->halg.base);
622}
623EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
624
625int crypto_register_ahashes(struct ahash_alg *algs, int count)
626{
627 int i, ret;
628
629 for (i = 0; i < count; i++) {
630 ret = crypto_register_ahash(&algs[i]);
631 if (ret)
632 goto err;
633 }
634
635 return 0;
636
637err:
638 for (--i; i >= 0; --i)
639 crypto_unregister_ahash(&algs[i]);
640
641 return ret;
642}
643EXPORT_SYMBOL_GPL(crypto_register_ahashes);
644
645void crypto_unregister_ahashes(struct ahash_alg *algs, int count)
646{
647 int i;
648
649 for (i = count - 1; i >= 0; --i)
650 crypto_unregister_ahash(&algs[i]);
651}
652EXPORT_SYMBOL_GPL(crypto_unregister_ahashes);
653
654int ahash_register_instance(struct crypto_template *tmpl,
655 struct ahash_instance *inst)
656{
657 int err;
658
659 if (WARN_ON(!inst->free))
660 return -EINVAL;
661
662 err = ahash_prepare_alg(&inst->alg);
663 if (err)
664 return err;
665
666 return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
667}
668EXPORT_SYMBOL_GPL(ahash_register_instance);
669
670bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
671{
672 struct crypto_alg *alg = &halg->base;
673
674 if (alg->cra_type != &crypto_ahash_type)
675 return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg));
676
677 return __crypto_ahash_alg(alg)->setkey != NULL;
678}
679EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey);
680
681MODULE_LICENSE("GPL");
682MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
1/*
2 * Asynchronous Cryptographic Hash operations.
3 *
4 * This is the asynchronous version of hash.c with notification of
5 * completion via a callback.
6 *
7 * Copyright (c) 2008 Loc Ho <lho@amcc.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 *
14 */
15
16#include <crypto/internal/hash.h>
17#include <crypto/scatterwalk.h>
18#include <linux/bug.h>
19#include <linux/err.h>
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/sched.h>
23#include <linux/slab.h>
24#include <linux/seq_file.h>
25#include <linux/cryptouser.h>
26#include <linux/compiler.h>
27#include <net/netlink.h>
28
29#include "internal.h"
30
31struct ahash_request_priv {
32 crypto_completion_t complete;
33 void *data;
34 u8 *result;
35 u32 flags;
36 void *ubuf[] CRYPTO_MINALIGN_ATTR;
37};
38
39static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
40{
41 return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
42 halg);
43}
44
45static int hash_walk_next(struct crypto_hash_walk *walk)
46{
47 unsigned int alignmask = walk->alignmask;
48 unsigned int offset = walk->offset;
49 unsigned int nbytes = min(walk->entrylen,
50 ((unsigned int)(PAGE_SIZE)) - offset);
51
52 if (walk->flags & CRYPTO_ALG_ASYNC)
53 walk->data = kmap(walk->pg);
54 else
55 walk->data = kmap_atomic(walk->pg);
56 walk->data += offset;
57
58 if (offset & alignmask) {
59 unsigned int unaligned = alignmask + 1 - (offset & alignmask);
60
61 if (nbytes > unaligned)
62 nbytes = unaligned;
63 }
64
65 walk->entrylen -= nbytes;
66 return nbytes;
67}
68
69static int hash_walk_new_entry(struct crypto_hash_walk *walk)
70{
71 struct scatterlist *sg;
72
73 sg = walk->sg;
74 walk->offset = sg->offset;
75 walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
76 walk->offset = offset_in_page(walk->offset);
77 walk->entrylen = sg->length;
78
79 if (walk->entrylen > walk->total)
80 walk->entrylen = walk->total;
81 walk->total -= walk->entrylen;
82
83 return hash_walk_next(walk);
84}
85
86int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
87{
88 unsigned int alignmask = walk->alignmask;
89 unsigned int nbytes = walk->entrylen;
90
91 walk->data -= walk->offset;
92
93 if (nbytes && walk->offset & alignmask && !err) {
94 walk->offset = ALIGN(walk->offset, alignmask + 1);
95 nbytes = min(nbytes,
96 ((unsigned int)(PAGE_SIZE)) - walk->offset);
97 walk->entrylen -= nbytes;
98
99 if (nbytes) {
100 walk->data += walk->offset;
101 return nbytes;
102 }
103 }
104
105 if (walk->flags & CRYPTO_ALG_ASYNC)
106 kunmap(walk->pg);
107 else {
108 kunmap_atomic(walk->data);
109 /*
110 * The may sleep test only makes sense for sync users.
111 * Async users don't need to sleep here anyway.
112 */
113 crypto_yield(walk->flags);
114 }
115
116 if (err)
117 return err;
118
119 if (nbytes) {
120 walk->offset = 0;
121 walk->pg++;
122 return hash_walk_next(walk);
123 }
124
125 if (!walk->total)
126 return 0;
127
128 walk->sg = sg_next(walk->sg);
129
130 return hash_walk_new_entry(walk);
131}
132EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
133
134int crypto_hash_walk_first(struct ahash_request *req,
135 struct crypto_hash_walk *walk)
136{
137 walk->total = req->nbytes;
138
139 if (!walk->total) {
140 walk->entrylen = 0;
141 return 0;
142 }
143
144 walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
145 walk->sg = req->src;
146 walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
147
148 return hash_walk_new_entry(walk);
149}
150EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
151
152int crypto_ahash_walk_first(struct ahash_request *req,
153 struct crypto_hash_walk *walk)
154{
155 walk->total = req->nbytes;
156
157 if (!walk->total) {
158 walk->entrylen = 0;
159 return 0;
160 }
161
162 walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
163 walk->sg = req->src;
164 walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
165 walk->flags |= CRYPTO_ALG_ASYNC;
166
167 BUILD_BUG_ON(CRYPTO_TFM_REQ_MASK & CRYPTO_ALG_ASYNC);
168
169 return hash_walk_new_entry(walk);
170}
171EXPORT_SYMBOL_GPL(crypto_ahash_walk_first);
172
173static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
174 unsigned int keylen)
175{
176 unsigned long alignmask = crypto_ahash_alignmask(tfm);
177 int ret;
178 u8 *buffer, *alignbuffer;
179 unsigned long absize;
180
181 absize = keylen + alignmask;
182 buffer = kmalloc(absize, GFP_KERNEL);
183 if (!buffer)
184 return -ENOMEM;
185
186 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
187 memcpy(alignbuffer, key, keylen);
188 ret = tfm->setkey(tfm, alignbuffer, keylen);
189 kzfree(buffer);
190 return ret;
191}
192
193int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
194 unsigned int keylen)
195{
196 unsigned long alignmask = crypto_ahash_alignmask(tfm);
197 int err;
198
199 if ((unsigned long)key & alignmask)
200 err = ahash_setkey_unaligned(tfm, key, keylen);
201 else
202 err = tfm->setkey(tfm, key, keylen);
203
204 if (err)
205 return err;
206
207 crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
208 return 0;
209}
210EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
211
212static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
213 unsigned int keylen)
214{
215 return -ENOSYS;
216}
217
218static inline unsigned int ahash_align_buffer_size(unsigned len,
219 unsigned long mask)
220{
221 return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
222}
223
224static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
225{
226 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
227 unsigned long alignmask = crypto_ahash_alignmask(tfm);
228 unsigned int ds = crypto_ahash_digestsize(tfm);
229 struct ahash_request_priv *priv;
230
231 priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
232 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
233 GFP_KERNEL : GFP_ATOMIC);
234 if (!priv)
235 return -ENOMEM;
236
237 /*
238 * WARNING: Voodoo programming below!
239 *
240 * The code below is obscure and hard to understand, thus explanation
241 * is necessary. See include/crypto/hash.h and include/linux/crypto.h
242 * to understand the layout of structures used here!
243 *
244 * The code here will replace portions of the ORIGINAL request with
245 * pointers to new code and buffers so the hashing operation can store
246 * the result in aligned buffer. We will call the modified request
247 * an ADJUSTED request.
248 *
249 * The newly mangled request will look as such:
250 *
251 * req {
252 * .result = ADJUSTED[new aligned buffer]
253 * .base.complete = ADJUSTED[pointer to completion function]
254 * .base.data = ADJUSTED[*req (pointer to self)]
255 * .priv = ADJUSTED[new priv] {
256 * .result = ORIGINAL(result)
257 * .complete = ORIGINAL(base.complete)
258 * .data = ORIGINAL(base.data)
259 * }
260 */
261
262 priv->result = req->result;
263 priv->complete = req->base.complete;
264 priv->data = req->base.data;
265 priv->flags = req->base.flags;
266
267 /*
268 * WARNING: We do not backup req->priv here! The req->priv
269 * is for internal use of the Crypto API and the
270 * user must _NOT_ _EVER_ depend on it's content!
271 */
272
273 req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
274 req->base.complete = cplt;
275 req->base.data = req;
276 req->priv = priv;
277
278 return 0;
279}
280
281static void ahash_restore_req(struct ahash_request *req, int err)
282{
283 struct ahash_request_priv *priv = req->priv;
284
285 if (!err)
286 memcpy(priv->result, req->result,
287 crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
288
289 /* Restore the original crypto request. */
290 req->result = priv->result;
291
292 ahash_request_set_callback(req, priv->flags,
293 priv->complete, priv->data);
294 req->priv = NULL;
295
296 /* Free the req->priv.priv from the ADJUSTED request. */
297 kzfree(priv);
298}
299
300static void ahash_notify_einprogress(struct ahash_request *req)
301{
302 struct ahash_request_priv *priv = req->priv;
303 struct crypto_async_request oreq;
304
305 oreq.data = priv->data;
306
307 priv->complete(&oreq, -EINPROGRESS);
308}
309
310static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
311{
312 struct ahash_request *areq = req->data;
313
314 if (err == -EINPROGRESS) {
315 ahash_notify_einprogress(areq);
316 return;
317 }
318
319 /*
320 * Restore the original request, see ahash_op_unaligned() for what
321 * goes where.
322 *
323 * The "struct ahash_request *req" here is in fact the "req.base"
324 * from the ADJUSTED request from ahash_op_unaligned(), thus as it
325 * is a pointer to self, it is also the ADJUSTED "req" .
326 */
327
328 /* First copy req->result into req->priv.result */
329 ahash_restore_req(areq, err);
330
331 /* Complete the ORIGINAL request. */
332 areq->base.complete(&areq->base, err);
333}
334
335static int ahash_op_unaligned(struct ahash_request *req,
336 int (*op)(struct ahash_request *))
337{
338 int err;
339
340 err = ahash_save_req(req, ahash_op_unaligned_done);
341 if (err)
342 return err;
343
344 err = op(req);
345 if (err == -EINPROGRESS || err == -EBUSY)
346 return err;
347
348 ahash_restore_req(req, err);
349
350 return err;
351}
352
353static int crypto_ahash_op(struct ahash_request *req,
354 int (*op)(struct ahash_request *))
355{
356 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
357 unsigned long alignmask = crypto_ahash_alignmask(tfm);
358
359 if ((unsigned long)req->result & alignmask)
360 return ahash_op_unaligned(req, op);
361
362 return op(req);
363}
364
365int crypto_ahash_final(struct ahash_request *req)
366{
367 return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
368}
369EXPORT_SYMBOL_GPL(crypto_ahash_final);
370
371int crypto_ahash_finup(struct ahash_request *req)
372{
373 return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
374}
375EXPORT_SYMBOL_GPL(crypto_ahash_finup);
376
377int crypto_ahash_digest(struct ahash_request *req)
378{
379 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
380
381 if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
382 return -ENOKEY;
383
384 return crypto_ahash_op(req, tfm->digest);
385}
386EXPORT_SYMBOL_GPL(crypto_ahash_digest);
387
388static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
389{
390 struct ahash_request *areq = req->data;
391
392 if (err == -EINPROGRESS)
393 return;
394
395 ahash_restore_req(areq, err);
396
397 areq->base.complete(&areq->base, err);
398}
399
400static int ahash_def_finup_finish1(struct ahash_request *req, int err)
401{
402 if (err)
403 goto out;
404
405 req->base.complete = ahash_def_finup_done2;
406
407 err = crypto_ahash_reqtfm(req)->final(req);
408 if (err == -EINPROGRESS || err == -EBUSY)
409 return err;
410
411out:
412 ahash_restore_req(req, err);
413 return err;
414}
415
416static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
417{
418 struct ahash_request *areq = req->data;
419
420 if (err == -EINPROGRESS) {
421 ahash_notify_einprogress(areq);
422 return;
423 }
424
425 areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
426
427 err = ahash_def_finup_finish1(areq, err);
428 if (areq->priv)
429 return;
430
431 areq->base.complete(&areq->base, err);
432}
433
434static int ahash_def_finup(struct ahash_request *req)
435{
436 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
437 int err;
438
439 err = ahash_save_req(req, ahash_def_finup_done1);
440 if (err)
441 return err;
442
443 err = tfm->update(req);
444 if (err == -EINPROGRESS || err == -EBUSY)
445 return err;
446
447 return ahash_def_finup_finish1(req, err);
448}
449
450static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
451{
452 struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
453 struct ahash_alg *alg = crypto_ahash_alg(hash);
454
455 hash->setkey = ahash_nosetkey;
456
457 if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
458 return crypto_init_shash_ops_async(tfm);
459
460 hash->init = alg->init;
461 hash->update = alg->update;
462 hash->final = alg->final;
463 hash->finup = alg->finup ?: ahash_def_finup;
464 hash->digest = alg->digest;
465 hash->export = alg->export;
466 hash->import = alg->import;
467
468 if (alg->setkey) {
469 hash->setkey = alg->setkey;
470 if (!(alg->halg.base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
471 crypto_ahash_set_flags(hash, CRYPTO_TFM_NEED_KEY);
472 }
473
474 return 0;
475}
476
477static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
478{
479 if (alg->cra_type != &crypto_ahash_type)
480 return sizeof(struct crypto_shash *);
481
482 return crypto_alg_extsize(alg);
483}
484
485#ifdef CONFIG_NET
486static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
487{
488 struct crypto_report_hash rhash;
489
490 strncpy(rhash.type, "ahash", sizeof(rhash.type));
491
492 rhash.blocksize = alg->cra_blocksize;
493 rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
494
495 if (nla_put(skb, CRYPTOCFGA_REPORT_HASH,
496 sizeof(struct crypto_report_hash), &rhash))
497 goto nla_put_failure;
498 return 0;
499
500nla_put_failure:
501 return -EMSGSIZE;
502}
503#else
504static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
505{
506 return -ENOSYS;
507}
508#endif
509
510static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
511 __maybe_unused;
512static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
513{
514 seq_printf(m, "type : ahash\n");
515 seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
516 "yes" : "no");
517 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
518 seq_printf(m, "digestsize : %u\n",
519 __crypto_hash_alg_common(alg)->digestsize);
520}
521
522const struct crypto_type crypto_ahash_type = {
523 .extsize = crypto_ahash_extsize,
524 .init_tfm = crypto_ahash_init_tfm,
525#ifdef CONFIG_PROC_FS
526 .show = crypto_ahash_show,
527#endif
528 .report = crypto_ahash_report,
529 .maskclear = ~CRYPTO_ALG_TYPE_MASK,
530 .maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
531 .type = CRYPTO_ALG_TYPE_AHASH,
532 .tfmsize = offsetof(struct crypto_ahash, base),
533};
534EXPORT_SYMBOL_GPL(crypto_ahash_type);
535
536struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
537 u32 mask)
538{
539 return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
540}
541EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
542
543int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
544{
545 return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask);
546}
547EXPORT_SYMBOL_GPL(crypto_has_ahash);
548
549static int ahash_prepare_alg(struct ahash_alg *alg)
550{
551 struct crypto_alg *base = &alg->halg.base;
552
553 if (alg->halg.digestsize > PAGE_SIZE / 8 ||
554 alg->halg.statesize > PAGE_SIZE / 8 ||
555 alg->halg.statesize == 0)
556 return -EINVAL;
557
558 base->cra_type = &crypto_ahash_type;
559 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
560 base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
561
562 return 0;
563}
564
565int crypto_register_ahash(struct ahash_alg *alg)
566{
567 struct crypto_alg *base = &alg->halg.base;
568 int err;
569
570 err = ahash_prepare_alg(alg);
571 if (err)
572 return err;
573
574 return crypto_register_alg(base);
575}
576EXPORT_SYMBOL_GPL(crypto_register_ahash);
577
578int crypto_unregister_ahash(struct ahash_alg *alg)
579{
580 return crypto_unregister_alg(&alg->halg.base);
581}
582EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
583
584int crypto_register_ahashes(struct ahash_alg *algs, int count)
585{
586 int i, ret;
587
588 for (i = 0; i < count; i++) {
589 ret = crypto_register_ahash(&algs[i]);
590 if (ret)
591 goto err;
592 }
593
594 return 0;
595
596err:
597 for (--i; i >= 0; --i)
598 crypto_unregister_ahash(&algs[i]);
599
600 return ret;
601}
602EXPORT_SYMBOL_GPL(crypto_register_ahashes);
603
604void crypto_unregister_ahashes(struct ahash_alg *algs, int count)
605{
606 int i;
607
608 for (i = count - 1; i >= 0; --i)
609 crypto_unregister_ahash(&algs[i]);
610}
611EXPORT_SYMBOL_GPL(crypto_unregister_ahashes);
612
613int ahash_register_instance(struct crypto_template *tmpl,
614 struct ahash_instance *inst)
615{
616 int err;
617
618 err = ahash_prepare_alg(&inst->alg);
619 if (err)
620 return err;
621
622 return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
623}
624EXPORT_SYMBOL_GPL(ahash_register_instance);
625
626void ahash_free_instance(struct crypto_instance *inst)
627{
628 crypto_drop_spawn(crypto_instance_ctx(inst));
629 kfree(ahash_instance(inst));
630}
631EXPORT_SYMBOL_GPL(ahash_free_instance);
632
633int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
634 struct hash_alg_common *alg,
635 struct crypto_instance *inst)
636{
637 return crypto_init_spawn2(&spawn->base, &alg->base, inst,
638 &crypto_ahash_type);
639}
640EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn);
641
642struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
643{
644 struct crypto_alg *alg;
645
646 alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask);
647 return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg);
648}
649EXPORT_SYMBOL_GPL(ahash_attr_alg);
650
651bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
652{
653 struct crypto_alg *alg = &halg->base;
654
655 if (alg->cra_type != &crypto_ahash_type)
656 return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg));
657
658 return __crypto_ahash_alg(alg)->setkey != NULL;
659}
660EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey);
661
662MODULE_LICENSE("GPL");
663MODULE_DESCRIPTION("Asynchronous cryptographic hash type");