Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Asynchronous Cryptographic Hash operations.
  4 *
  5 * This is the asynchronous version of hash.c with notification of
  6 * completion via a callback.
 
 
 
 
  7 *
  8 * Copyright (c) 2008 Loc Ho <lho@amcc.com>
  9 */
 10
 11#include <crypto/internal/hash.h>
 12#include <crypto/scatterwalk.h>
 13#include <linux/bug.h>
 14#include <linux/err.h>
 15#include <linux/kernel.h>
 16#include <linux/module.h>
 17#include <linux/sched.h>
 18#include <linux/slab.h>
 19#include <linux/seq_file.h>
 20#include <linux/cryptouser.h>
 21#include <linux/compiler.h>
 22#include <net/netlink.h>
 23
 24#include "internal.h"
 25
 26static const struct crypto_type crypto_ahash_type;
 27
 28struct ahash_request_priv {
 29	crypto_completion_t complete;
 30	void *data;
 31	u8 *result;
 32	u32 flags;
 33	void *ubuf[] CRYPTO_MINALIGN_ATTR;
 34};
 35
 36static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
 37{
 38	return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
 39			    halg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 40}
 41
 42static int hash_walk_next(struct crypto_hash_walk *walk)
 43{
 44	unsigned int alignmask = walk->alignmask;
 45	unsigned int offset = walk->offset;
 46	unsigned int nbytes = min(walk->entrylen,
 47				  ((unsigned int)(PAGE_SIZE)) - offset);
 48
 49	if (walk->flags & CRYPTO_ALG_ASYNC)
 50		walk->data = kmap(walk->pg);
 51	else
 52		walk->data = kmap_atomic(walk->pg);
 53	walk->data += offset;
 54
 55	if (offset & alignmask) {
 56		unsigned int unaligned = alignmask + 1 - (offset & alignmask);
 57
 58		if (nbytes > unaligned)
 59			nbytes = unaligned;
 60	}
 61
 62	walk->entrylen -= nbytes;
 63	return nbytes;
 64}
 65
 66static int hash_walk_new_entry(struct crypto_hash_walk *walk)
 67{
 68	struct scatterlist *sg;
 69
 70	sg = walk->sg;
 71	walk->offset = sg->offset;
 72	walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
 73	walk->offset = offset_in_page(walk->offset);
 74	walk->entrylen = sg->length;
 75
 76	if (walk->entrylen > walk->total)
 77		walk->entrylen = walk->total;
 78	walk->total -= walk->entrylen;
 79
 80	return hash_walk_next(walk);
 81}
 82
 83int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
 84{
 85	unsigned int alignmask = walk->alignmask;
 86
 87	walk->data -= walk->offset;
 88
 89	if (walk->entrylen && (walk->offset & alignmask) && !err) {
 90		unsigned int nbytes;
 91
 92		walk->offset = ALIGN(walk->offset, alignmask + 1);
 93		nbytes = min(walk->entrylen,
 94			     (unsigned int)(PAGE_SIZE - walk->offset));
 95		if (nbytes) {
 96			walk->entrylen -= nbytes;
 97			walk->data += walk->offset;
 98			return nbytes;
 99		}
100	}
101
102	if (walk->flags & CRYPTO_ALG_ASYNC)
103		kunmap(walk->pg);
104	else {
105		kunmap_atomic(walk->data);
106		/*
107		 * The may sleep test only makes sense for sync users.
108		 * Async users don't need to sleep here anyway.
109		 */
110		crypto_yield(walk->flags);
111	}
112
113	if (err)
114		return err;
115
116	if (walk->entrylen) {
117		walk->offset = 0;
118		walk->pg++;
119		return hash_walk_next(walk);
120	}
121
122	if (!walk->total)
123		return 0;
124
125	walk->sg = sg_next(walk->sg);
126
127	return hash_walk_new_entry(walk);
128}
129EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
130
131int crypto_hash_walk_first(struct ahash_request *req,
132			   struct crypto_hash_walk *walk)
133{
134	walk->total = req->nbytes;
135
136	if (!walk->total) {
137		walk->entrylen = 0;
138		return 0;
139	}
140
141	walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
142	walk->sg = req->src;
143	walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
144
145	return hash_walk_new_entry(walk);
146}
147EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
148
149int crypto_ahash_walk_first(struct ahash_request *req,
150			    struct crypto_hash_walk *walk)
151{
152	walk->total = req->nbytes;
153
154	if (!walk->total) {
155		walk->entrylen = 0;
156		return 0;
157	}
158
159	walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
160	walk->sg = req->src;
161	walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
162	walk->flags |= CRYPTO_ALG_ASYNC;
163
164	BUILD_BUG_ON(CRYPTO_TFM_REQ_MASK & CRYPTO_ALG_ASYNC);
165
166	return hash_walk_new_entry(walk);
167}
168EXPORT_SYMBOL_GPL(crypto_ahash_walk_first);
169
170static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
171				unsigned int keylen)
172{
173	unsigned long alignmask = crypto_ahash_alignmask(tfm);
174	int ret;
175	u8 *buffer, *alignbuffer;
176	unsigned long absize;
177
178	absize = keylen + alignmask;
179	buffer = kmalloc(absize, GFP_KERNEL);
180	if (!buffer)
181		return -ENOMEM;
182
183	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
184	memcpy(alignbuffer, key, keylen);
185	ret = tfm->setkey(tfm, alignbuffer, keylen);
186	kfree_sensitive(buffer);
187	return ret;
188}
189
190static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
191			  unsigned int keylen)
192{
193	return -ENOSYS;
194}
195
196static void ahash_set_needkey(struct crypto_ahash *tfm)
197{
198	const struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
199
200	if (tfm->setkey != ahash_nosetkey &&
201	    !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
202		crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
203}
204
205int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
206			unsigned int keylen)
207{
208	unsigned long alignmask = crypto_ahash_alignmask(tfm);
209	int err;
210
211	if ((unsigned long)key & alignmask)
212		err = ahash_setkey_unaligned(tfm, key, keylen);
213	else
214		err = tfm->setkey(tfm, key, keylen);
215
216	if (unlikely(err)) {
217		ahash_set_needkey(tfm);
218		return err;
 
 
 
 
 
 
 
 
 
219	}
220
221	crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
222	return 0;
223}
224EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
225
226static inline unsigned int ahash_align_buffer_size(unsigned len,
227						   unsigned long mask)
228{
229	return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
 
 
 
 
 
 
230}
 
231
232static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
 
233{
234	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
235	unsigned long alignmask = crypto_ahash_alignmask(tfm);
236	unsigned int ds = crypto_ahash_digestsize(tfm);
237	struct ahash_request_priv *priv;
 
 
 
 
 
238
239	priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
240		       (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
241		       GFP_KERNEL : GFP_ATOMIC);
242	if (!priv)
 
 
 
 
 
 
243		return -ENOMEM;
244
245	/*
246	 * WARNING: Voodoo programming below!
247	 *
248	 * The code below is obscure and hard to understand, thus explanation
249	 * is necessary. See include/crypto/hash.h and include/linux/crypto.h
250	 * to understand the layout of structures used here!
251	 *
252	 * The code here will replace portions of the ORIGINAL request with
253	 * pointers to new code and buffers so the hashing operation can store
254	 * the result in aligned buffer. We will call the modified request
255	 * an ADJUSTED request.
256	 *
257	 * The newly mangled request will look as such:
258	 *
259	 * req {
260	 *   .result        = ADJUSTED[new aligned buffer]
261	 *   .base.complete = ADJUSTED[pointer to completion function]
262	 *   .base.data     = ADJUSTED[*req (pointer to self)]
263	 *   .priv          = ADJUSTED[new priv] {
264	 *           .result   = ORIGINAL(result)
265	 *           .complete = ORIGINAL(base.complete)
266	 *           .data     = ORIGINAL(base.data)
267	 *   }
268	 */
269
270	priv->result = req->result;
271	priv->complete = req->base.complete;
272	priv->data = req->base.data;
273	priv->flags = req->base.flags;
274
275	/*
276	 * WARNING: We do not backup req->priv here! The req->priv
277	 *          is for internal use of the Crypto API and the
278	 *          user must _NOT_ _EVER_ depend on it's content!
279	 */
280
281	req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
282	req->base.complete = cplt;
283	req->base.data = req;
284	req->priv = priv;
285
286	return 0;
287}
288
289static void ahash_restore_req(struct ahash_request *req, int err)
290{
291	struct ahash_request_priv *priv = req->priv;
292
293	if (!err)
294		memcpy(priv->result, req->result,
295		       crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
296
297	/* Restore the original crypto request. */
298	req->result = priv->result;
299
300	ahash_request_set_callback(req, priv->flags,
301				   priv->complete, priv->data);
302	req->priv = NULL;
303
304	/* Free the req->priv.priv from the ADJUSTED request. */
305	kfree_sensitive(priv);
306}
307
308static void ahash_notify_einprogress(struct ahash_request *req)
309{
310	struct ahash_request_priv *priv = req->priv;
311	struct crypto_async_request oreq;
312
313	oreq.data = priv->data;
314
315	priv->complete(&oreq, -EINPROGRESS);
316}
317
318static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
319{
320	struct ahash_request *areq = req->data;
 
 
321
322	if (err == -EINPROGRESS) {
323		ahash_notify_einprogress(areq);
324		return;
325	}
326
327	/*
328	 * Restore the original request, see ahash_op_unaligned() for what
329	 * goes where.
330	 *
331	 * The "struct ahash_request *req" here is in fact the "req.base"
332	 * from the ADJUSTED request from ahash_op_unaligned(), thus as it
333	 * is a pointer to self, it is also the ADJUSTED "req" .
334	 */
335
336	/* First copy req->result into req->priv.result */
337	ahash_restore_req(areq, err);
338
339	/* Complete the ORIGINAL request. */
340	areq->base.complete(&areq->base, err);
341}
342
343static int ahash_op_unaligned(struct ahash_request *req,
344			      int (*op)(struct ahash_request *))
345{
346	int err;
347
348	err = ahash_save_req(req, ahash_op_unaligned_done);
349	if (err)
350		return err;
351
352	err = op(req);
353	if (err == -EINPROGRESS || err == -EBUSY)
354		return err;
355
356	ahash_restore_req(req, err);
357
358	return err;
359}
360
361static int crypto_ahash_op(struct ahash_request *req,
362			   int (*op)(struct ahash_request *))
363{
364	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
365	unsigned long alignmask = crypto_ahash_alignmask(tfm);
366
367	if ((unsigned long)req->result & alignmask)
368		return ahash_op_unaligned(req, op);
369
370	return op(req);
 
 
 
371}
 
372
373int crypto_ahash_final(struct ahash_request *req)
374{
375	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
376	struct crypto_alg *alg = tfm->base.__crt_alg;
377	unsigned int nbytes = req->nbytes;
378	int ret;
379
380	crypto_stats_get(alg);
381	ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
382	crypto_stats_ahash_final(nbytes, ret, alg);
383	return ret;
 
 
 
384}
385EXPORT_SYMBOL_GPL(crypto_ahash_final);
386
387int crypto_ahash_finup(struct ahash_request *req)
388{
389	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
390	struct crypto_alg *alg = tfm->base.__crt_alg;
391	unsigned int nbytes = req->nbytes;
392	int ret;
393
394	crypto_stats_get(alg);
395	ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
396	crypto_stats_ahash_final(nbytes, ret, alg);
397	return ret;
 
 
 
 
 
 
 
398}
399EXPORT_SYMBOL_GPL(crypto_ahash_finup);
400
401int crypto_ahash_digest(struct ahash_request *req)
402{
403	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
404	struct crypto_alg *alg = tfm->base.__crt_alg;
405	unsigned int nbytes = req->nbytes;
406	int ret;
 
 
 
 
 
 
 
 
 
 
407
408	crypto_stats_get(alg);
409	if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
410		ret = -ENOKEY;
411	else
412		ret = crypto_ahash_op(req, tfm->digest);
413	crypto_stats_ahash_final(nbytes, ret, alg);
414	return ret;
415}
416EXPORT_SYMBOL_GPL(crypto_ahash_digest);
417
418static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
419{
420	struct ahash_request *areq = req->data;
421
422	if (err == -EINPROGRESS)
423		return;
424
425	ahash_restore_req(areq, err);
426
427	areq->base.complete(&areq->base, err);
428}
429
430static int ahash_def_finup_finish1(struct ahash_request *req, int err)
431{
 
 
432	if (err)
433		goto out;
434
435	req->base.complete = ahash_def_finup_done2;
436
437	err = crypto_ahash_reqtfm(req)->final(req);
438	if (err == -EINPROGRESS || err == -EBUSY)
439		return err;
440
441out:
442	ahash_restore_req(req, err);
443	return err;
444}
445
446static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
447{
448	struct ahash_request *areq = req->data;
 
449
450	if (err == -EINPROGRESS) {
451		ahash_notify_einprogress(areq);
452		return;
453	}
454
455	areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 
456
457	err = ahash_def_finup_finish1(areq, err);
458	if (areq->priv)
459		return;
460
461	areq->base.complete(&areq->base, err);
 
462}
463
464static int ahash_def_finup(struct ahash_request *req)
465{
466	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
467	int err;
468
469	err = ahash_save_req(req, ahash_def_finup_done1);
470	if (err)
471		return err;
472
473	err = tfm->update(req);
474	if (err == -EINPROGRESS || err == -EBUSY)
475		return err;
476
477	return ahash_def_finup_finish1(req, err);
478}
479
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
480static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
481{
482	struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
483	struct ahash_alg *alg = crypto_ahash_alg(hash);
484
485	hash->setkey = ahash_nosetkey;
486
487	if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
488		return crypto_init_shash_ops_async(tfm);
489
490	hash->init = alg->init;
491	hash->update = alg->update;
492	hash->final = alg->final;
493	hash->finup = alg->finup ?: ahash_def_finup;
494	hash->digest = alg->digest;
495	hash->export = alg->export;
496	hash->import = alg->import;
497
498	if (alg->setkey) {
499		hash->setkey = alg->setkey;
500		ahash_set_needkey(hash);
501	}
502
503	return 0;
 
 
 
504}
505
506static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
507{
508	if (alg->cra_type != &crypto_ahash_type)
509		return sizeof(struct crypto_shash *);
510
511	return crypto_alg_extsize(alg);
512}
513
514static void crypto_ahash_free_instance(struct crypto_instance *inst)
515{
516	struct ahash_instance *ahash = ahash_instance(inst);
517
518	ahash->free(ahash);
519}
520
521#ifdef CONFIG_NET
522static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
523{
524	struct crypto_report_hash rhash;
525
526	memset(&rhash, 0, sizeof(rhash));
527
528	strscpy(rhash.type, "ahash", sizeof(rhash.type));
529
530	rhash.blocksize = alg->cra_blocksize;
531	rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
532
533	return nla_put(skb, CRYPTOCFGA_REPORT_HASH, sizeof(rhash), &rhash);
534}
535#else
536static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
537{
538	return -ENOSYS;
539}
540#endif
541
542static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
543	__maybe_unused;
544static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
545{
546	seq_printf(m, "type         : ahash\n");
547	seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
548					     "yes" : "no");
549	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
550	seq_printf(m, "digestsize   : %u\n",
551		   __crypto_hash_alg_common(alg)->digestsize);
552}
553
 
 
 
 
 
 
554static const struct crypto_type crypto_ahash_type = {
555	.extsize = crypto_ahash_extsize,
556	.init_tfm = crypto_ahash_init_tfm,
557	.free = crypto_ahash_free_instance,
558#ifdef CONFIG_PROC_FS
559	.show = crypto_ahash_show,
560#endif
 
561	.report = crypto_ahash_report,
 
 
 
 
562	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
563	.maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
564	.type = CRYPTO_ALG_TYPE_AHASH,
565	.tfmsize = offsetof(struct crypto_ahash, base),
566};
567
568int crypto_grab_ahash(struct crypto_ahash_spawn *spawn,
569		      struct crypto_instance *inst,
570		      const char *name, u32 type, u32 mask)
571{
572	spawn->base.frontend = &crypto_ahash_type;
573	return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
574}
575EXPORT_SYMBOL_GPL(crypto_grab_ahash);
576
577struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
578					u32 mask)
579{
580	return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
581}
582EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
583
584int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
585{
586	return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask);
587}
588EXPORT_SYMBOL_GPL(crypto_has_ahash);
589
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
590static int ahash_prepare_alg(struct ahash_alg *alg)
591{
592	struct crypto_alg *base = &alg->halg.base;
 
593
594	if (alg->halg.digestsize > HASH_MAX_DIGESTSIZE ||
595	    alg->halg.statesize > HASH_MAX_STATESIZE ||
596	    alg->halg.statesize == 0)
597		return -EINVAL;
598
 
 
 
 
599	base->cra_type = &crypto_ahash_type;
600	base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
601	base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
602
 
 
 
 
 
603	return 0;
604}
605
606int crypto_register_ahash(struct ahash_alg *alg)
607{
608	struct crypto_alg *base = &alg->halg.base;
609	int err;
610
611	err = ahash_prepare_alg(alg);
612	if (err)
613		return err;
614
615	return crypto_register_alg(base);
616}
617EXPORT_SYMBOL_GPL(crypto_register_ahash);
618
619void crypto_unregister_ahash(struct ahash_alg *alg)
620{
621	crypto_unregister_alg(&alg->halg.base);
622}
623EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
624
625int crypto_register_ahashes(struct ahash_alg *algs, int count)
626{
627	int i, ret;
628
629	for (i = 0; i < count; i++) {
630		ret = crypto_register_ahash(&algs[i]);
631		if (ret)
632			goto err;
633	}
634
635	return 0;
636
637err:
638	for (--i; i >= 0; --i)
639		crypto_unregister_ahash(&algs[i]);
640
641	return ret;
642}
643EXPORT_SYMBOL_GPL(crypto_register_ahashes);
644
645void crypto_unregister_ahashes(struct ahash_alg *algs, int count)
646{
647	int i;
648
649	for (i = count - 1; i >= 0; --i)
650		crypto_unregister_ahash(&algs[i]);
651}
652EXPORT_SYMBOL_GPL(crypto_unregister_ahashes);
653
654int ahash_register_instance(struct crypto_template *tmpl,
655			    struct ahash_instance *inst)
656{
657	int err;
658
659	if (WARN_ON(!inst->free))
660		return -EINVAL;
661
662	err = ahash_prepare_alg(&inst->alg);
663	if (err)
664		return err;
665
666	return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
667}
668EXPORT_SYMBOL_GPL(ahash_register_instance);
669
670bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
671{
672	struct crypto_alg *alg = &halg->base;
673
674	if (alg->cra_type != &crypto_ahash_type)
675		return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg));
676
677	return __crypto_ahash_alg(alg)->setkey != NULL;
678}
679EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey);
680
681MODULE_LICENSE("GPL");
682MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
v6.8
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Asynchronous Cryptographic Hash operations.
  4 *
  5 * This is the implementation of the ahash (asynchronous hash) API.  It differs
  6 * from shash (synchronous hash) in that ahash supports asynchronous operations,
  7 * and it hashes data from scatterlists instead of virtually addressed buffers.
  8 *
  9 * The ahash API provides access to both ahash and shash algorithms.  The shash
 10 * API only provides access to shash algorithms.
 11 *
 12 * Copyright (c) 2008 Loc Ho <lho@amcc.com>
 13 */
 14
 
 15#include <crypto/scatterwalk.h>
 16#include <linux/cryptouser.h>
 17#include <linux/err.h>
 18#include <linux/kernel.h>
 19#include <linux/module.h>
 20#include <linux/sched.h>
 21#include <linux/slab.h>
 22#include <linux/seq_file.h>
 23#include <linux/string.h>
 
 24#include <net/netlink.h>
 25
 26#include "hash.h"
 27
 28#define CRYPTO_ALG_TYPE_AHASH_MASK	0x0000000e
 29
 30static inline struct crypto_istat_hash *ahash_get_stat(struct ahash_alg *alg)
 31{
 32	return hash_get_stat(&alg->halg);
 33}
 
 
 
 34
 35static inline int crypto_ahash_errstat(struct ahash_alg *alg, int err)
 36{
 37	if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
 38		return err;
 39
 40	if (err && err != -EINPROGRESS && err != -EBUSY)
 41		atomic64_inc(&ahash_get_stat(alg)->err_cnt);
 42
 43	return err;
 44}
 45
 46/*
 47 * For an ahash tfm that is using an shash algorithm (instead of an ahash
 48 * algorithm), this returns the underlying shash tfm.
 49 */
 50static inline struct crypto_shash *ahash_to_shash(struct crypto_ahash *tfm)
 51{
 52	return *(struct crypto_shash **)crypto_ahash_ctx(tfm);
 53}
 54
 55static inline struct shash_desc *prepare_shash_desc(struct ahash_request *req,
 56						    struct crypto_ahash *tfm)
 57{
 58	struct shash_desc *desc = ahash_request_ctx(req);
 59
 60	desc->tfm = ahash_to_shash(tfm);
 61	return desc;
 62}
 63
 64int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc)
 65{
 66	struct crypto_hash_walk walk;
 67	int nbytes;
 68
 69	for (nbytes = crypto_hash_walk_first(req, &walk); nbytes > 0;
 70	     nbytes = crypto_hash_walk_done(&walk, nbytes))
 71		nbytes = crypto_shash_update(desc, walk.data, nbytes);
 72
 73	return nbytes;
 74}
 75EXPORT_SYMBOL_GPL(shash_ahash_update);
 76
 77int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc)
 78{
 79	struct crypto_hash_walk walk;
 80	int nbytes;
 81
 82	nbytes = crypto_hash_walk_first(req, &walk);
 83	if (!nbytes)
 84		return crypto_shash_final(desc, req->result);
 85
 86	do {
 87		nbytes = crypto_hash_walk_last(&walk) ?
 88			 crypto_shash_finup(desc, walk.data, nbytes,
 89					    req->result) :
 90			 crypto_shash_update(desc, walk.data, nbytes);
 91		nbytes = crypto_hash_walk_done(&walk, nbytes);
 92	} while (nbytes > 0);
 93
 94	return nbytes;
 95}
 96EXPORT_SYMBOL_GPL(shash_ahash_finup);
 97
 98int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc)
 99{
100	unsigned int nbytes = req->nbytes;
101	struct scatterlist *sg;
102	unsigned int offset;
103	int err;
104
105	if (nbytes &&
106	    (sg = req->src, offset = sg->offset,
107	     nbytes <= min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) {
108		void *data;
109
110		data = kmap_local_page(sg_page(sg));
111		err = crypto_shash_digest(desc, data + offset, nbytes,
112					  req->result);
113		kunmap_local(data);
114	} else
115		err = crypto_shash_init(desc) ?:
116		      shash_ahash_finup(req, desc);
117
118	return err;
119}
120EXPORT_SYMBOL_GPL(shash_ahash_digest);
121
122static void crypto_exit_ahash_using_shash(struct crypto_tfm *tfm)
123{
124	struct crypto_shash **ctx = crypto_tfm_ctx(tfm);
125
126	crypto_free_shash(*ctx);
127}
128
129static int crypto_init_ahash_using_shash(struct crypto_tfm *tfm)
130{
131	struct crypto_alg *calg = tfm->__crt_alg;
132	struct crypto_ahash *crt = __crypto_ahash_cast(tfm);
133	struct crypto_shash **ctx = crypto_tfm_ctx(tfm);
134	struct crypto_shash *shash;
135
136	if (!crypto_mod_get(calg))
137		return -EAGAIN;
138
139	shash = crypto_create_tfm(calg, &crypto_shash_type);
140	if (IS_ERR(shash)) {
141		crypto_mod_put(calg);
142		return PTR_ERR(shash);
143	}
144
145	crt->using_shash = true;
146	*ctx = shash;
147	tfm->exit = crypto_exit_ahash_using_shash;
148
149	crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) &
150				    CRYPTO_TFM_NEED_KEY);
151	crt->reqsize = sizeof(struct shash_desc) + crypto_shash_descsize(shash);
152
153	return 0;
154}
155
156static int hash_walk_next(struct crypto_hash_walk *walk)
157{
 
158	unsigned int offset = walk->offset;
159	unsigned int nbytes = min(walk->entrylen,
160				  ((unsigned int)(PAGE_SIZE)) - offset);
161
162	walk->data = kmap_local_page(walk->pg);
 
 
 
163	walk->data += offset;
 
 
 
 
 
 
 
 
164	walk->entrylen -= nbytes;
165	return nbytes;
166}
167
168static int hash_walk_new_entry(struct crypto_hash_walk *walk)
169{
170	struct scatterlist *sg;
171
172	sg = walk->sg;
173	walk->offset = sg->offset;
174	walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
175	walk->offset = offset_in_page(walk->offset);
176	walk->entrylen = sg->length;
177
178	if (walk->entrylen > walk->total)
179		walk->entrylen = walk->total;
180	walk->total -= walk->entrylen;
181
182	return hash_walk_next(walk);
183}
184
185int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
186{
 
 
187	walk->data -= walk->offset;
188
189	kunmap_local(walk->data);
190	crypto_yield(walk->flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
191
192	if (err)
193		return err;
194
195	if (walk->entrylen) {
196		walk->offset = 0;
197		walk->pg++;
198		return hash_walk_next(walk);
199	}
200
201	if (!walk->total)
202		return 0;
203
204	walk->sg = sg_next(walk->sg);
205
206	return hash_walk_new_entry(walk);
207}
208EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
209
210int crypto_hash_walk_first(struct ahash_request *req,
211			   struct crypto_hash_walk *walk)
212{
213	walk->total = req->nbytes;
214
215	if (!walk->total) {
216		walk->entrylen = 0;
217		return 0;
218	}
219
 
220	walk->sg = req->src;
221	walk->flags = req->base.flags;
222
223	return hash_walk_new_entry(walk);
224}
225EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
226
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
227static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
228			  unsigned int keylen)
229{
230	return -ENOSYS;
231}
232
233static void ahash_set_needkey(struct crypto_ahash *tfm, struct ahash_alg *alg)
234{
235	if (alg->setkey != ahash_nosetkey &&
236	    !(alg->halg.base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
 
 
237		crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
238}
239
240int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
241			unsigned int keylen)
242{
243	if (likely(tfm->using_shash)) {
244		struct crypto_shash *shash = ahash_to_shash(tfm);
245		int err;
246
247		err = crypto_shash_setkey(shash, key, keylen);
248		if (unlikely(err)) {
249			crypto_ahash_set_flags(tfm,
250					       crypto_shash_get_flags(shash) &
251					       CRYPTO_TFM_NEED_KEY);
252			return err;
253		}
254	} else {
255		struct ahash_alg *alg = crypto_ahash_alg(tfm);
256		int err;
257
258		err = alg->setkey(tfm, key, keylen);
259		if (unlikely(err)) {
260			ahash_set_needkey(tfm, alg);
261			return err;
262		}
263	}
 
264	crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
265	return 0;
266}
267EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
268
269int crypto_ahash_init(struct ahash_request *req)
 
270{
271	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
272
273	if (likely(tfm->using_shash))
274		return crypto_shash_init(prepare_shash_desc(req, tfm));
275	if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
276		return -ENOKEY;
277	return crypto_ahash_alg(tfm)->init(req);
278}
279EXPORT_SYMBOL_GPL(crypto_ahash_init);
280
281static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt,
282			  bool has_state)
283{
284	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 
285	unsigned int ds = crypto_ahash_digestsize(tfm);
286	struct ahash_request *subreq;
287	unsigned int subreq_size;
288	unsigned int reqsize;
289	u8 *result;
290	gfp_t gfp;
291	u32 flags;
292
293	subreq_size = sizeof(*subreq);
294	reqsize = crypto_ahash_reqsize(tfm);
295	reqsize = ALIGN(reqsize, crypto_tfm_ctx_alignment());
296	subreq_size += reqsize;
297	subreq_size += ds;
298
299	flags = ahash_request_flags(req);
300	gfp = (flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?  GFP_KERNEL : GFP_ATOMIC;
301	subreq = kmalloc(subreq_size, gfp);
302	if (!subreq)
303		return -ENOMEM;
304
305	ahash_request_set_tfm(subreq, tfm);
306	ahash_request_set_callback(subreq, flags, cplt, req);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
307
308	result = (u8 *)(subreq + 1) + reqsize;
 
 
 
309
310	ahash_request_set_crypt(subreq, req->src, result, req->nbytes);
311
312	if (has_state) {
313		void *state;
314
315		state = kmalloc(crypto_ahash_statesize(tfm), gfp);
316		if (!state) {
317			kfree(subreq);
318			return -ENOMEM;
319		}
320
321		crypto_ahash_export(req, state);
322		crypto_ahash_import(subreq, state);
323		kfree_sensitive(state);
324	}
325
326	req->priv = subreq;
 
 
 
 
 
 
 
327
328	return 0;
 
 
 
 
329}
330
331static void ahash_restore_req(struct ahash_request *req, int err)
 
332{
333	struct ahash_request *subreq = req->priv;
334
335	if (!err)
336		memcpy(req->result, subreq->result,
337		       crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
 
 
 
 
338
339	req->priv = NULL;
340
341	kfree_sensitive(subreq);
342}
343
344int crypto_ahash_update(struct ahash_request *req)
 
345{
346	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
347	struct ahash_alg *alg;
348
349	if (likely(tfm->using_shash))
350		return shash_ahash_update(req, ahash_request_ctx(req));
351
352	alg = crypto_ahash_alg(tfm);
353	if (IS_ENABLED(CONFIG_CRYPTO_STATS))
354		atomic64_add(req->nbytes, &ahash_get_stat(alg)->hash_tlen);
355	return crypto_ahash_errstat(alg, alg->update(req));
356}
357EXPORT_SYMBOL_GPL(crypto_ahash_update);
358
359int crypto_ahash_final(struct ahash_request *req)
360{
361	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
362	struct ahash_alg *alg;
 
 
363
364	if (likely(tfm->using_shash))
365		return crypto_shash_final(ahash_request_ctx(req), req->result);
366
367	alg = crypto_ahash_alg(tfm);
368	if (IS_ENABLED(CONFIG_CRYPTO_STATS))
369		atomic64_inc(&ahash_get_stat(alg)->hash_cnt);
370	return crypto_ahash_errstat(alg, alg->final(req));
371}
372EXPORT_SYMBOL_GPL(crypto_ahash_final);
373
374int crypto_ahash_finup(struct ahash_request *req)
375{
376	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
377	struct ahash_alg *alg;
 
 
378
379	if (likely(tfm->using_shash))
380		return shash_ahash_finup(req, ahash_request_ctx(req));
381
382	alg = crypto_ahash_alg(tfm);
383	if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
384		struct crypto_istat_hash *istat = ahash_get_stat(alg);
385
386		atomic64_inc(&istat->hash_cnt);
387		atomic64_add(req->nbytes, &istat->hash_tlen);
388	}
389	return crypto_ahash_errstat(alg, alg->finup(req));
390}
391EXPORT_SYMBOL_GPL(crypto_ahash_finup);
392
393int crypto_ahash_digest(struct ahash_request *req)
394{
395	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
396	struct ahash_alg *alg;
397	int err;
398
399	if (likely(tfm->using_shash))
400		return shash_ahash_digest(req, prepare_shash_desc(req, tfm));
401
402	alg = crypto_ahash_alg(tfm);
403	if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
404		struct crypto_istat_hash *istat = ahash_get_stat(alg);
405
406		atomic64_inc(&istat->hash_cnt);
407		atomic64_add(req->nbytes, &istat->hash_tlen);
408	}
409
 
410	if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
411		err = -ENOKEY;
412	else
413		err = alg->digest(req);
414
415	return crypto_ahash_errstat(alg, err);
416}
417EXPORT_SYMBOL_GPL(crypto_ahash_digest);
418
419static void ahash_def_finup_done2(void *data, int err)
420{
421	struct ahash_request *areq = data;
422
423	if (err == -EINPROGRESS)
424		return;
425
426	ahash_restore_req(areq, err);
427
428	ahash_request_complete(areq, err);
429}
430
431static int ahash_def_finup_finish1(struct ahash_request *req, int err)
432{
433	struct ahash_request *subreq = req->priv;
434
435	if (err)
436		goto out;
437
438	subreq->base.complete = ahash_def_finup_done2;
439
440	err = crypto_ahash_alg(crypto_ahash_reqtfm(req))->final(subreq);
441	if (err == -EINPROGRESS || err == -EBUSY)
442		return err;
443
444out:
445	ahash_restore_req(req, err);
446	return err;
447}
448
449static void ahash_def_finup_done1(void *data, int err)
450{
451	struct ahash_request *areq = data;
452	struct ahash_request *subreq;
453
454	if (err == -EINPROGRESS)
455		goto out;
 
 
456
457	subreq = areq->priv;
458	subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
459
460	err = ahash_def_finup_finish1(areq, err);
461	if (err == -EINPROGRESS || err == -EBUSY)
462		return;
463
464out:
465	ahash_request_complete(areq, err);
466}
467
468static int ahash_def_finup(struct ahash_request *req)
469{
470	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
471	int err;
472
473	err = ahash_save_req(req, ahash_def_finup_done1, true);
474	if (err)
475		return err;
476
477	err = crypto_ahash_alg(tfm)->update(req->priv);
478	if (err == -EINPROGRESS || err == -EBUSY)
479		return err;
480
481	return ahash_def_finup_finish1(req, err);
482}
483
484int crypto_ahash_export(struct ahash_request *req, void *out)
485{
486	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
487
488	if (likely(tfm->using_shash))
489		return crypto_shash_export(ahash_request_ctx(req), out);
490	return crypto_ahash_alg(tfm)->export(req, out);
491}
492EXPORT_SYMBOL_GPL(crypto_ahash_export);
493
494int crypto_ahash_import(struct ahash_request *req, const void *in)
495{
496	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
497
498	if (likely(tfm->using_shash))
499		return crypto_shash_import(prepare_shash_desc(req, tfm), in);
500	if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
501		return -ENOKEY;
502	return crypto_ahash_alg(tfm)->import(req, in);
503}
504EXPORT_SYMBOL_GPL(crypto_ahash_import);
505
506static void crypto_ahash_exit_tfm(struct crypto_tfm *tfm)
507{
508	struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
509	struct ahash_alg *alg = crypto_ahash_alg(hash);
510
511	alg->exit_tfm(hash);
512}
513
514static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
515{
516	struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
517	struct ahash_alg *alg = crypto_ahash_alg(hash);
518
519	crypto_ahash_set_statesize(hash, alg->halg.statesize);
520
521	if (tfm->__crt_alg->cra_type == &crypto_shash_type)
522		return crypto_init_ahash_using_shash(tfm);
523
524	ahash_set_needkey(hash, alg);
 
 
 
 
 
 
 
 
 
 
 
525
526	if (alg->exit_tfm)
527		tfm->exit = crypto_ahash_exit_tfm;
528
529	return alg->init_tfm ? alg->init_tfm(hash) : 0;
530}
531
532static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
533{
534	if (alg->cra_type == &crypto_shash_type)
535		return sizeof(struct crypto_shash *);
536
537	return crypto_alg_extsize(alg);
538}
539
540static void crypto_ahash_free_instance(struct crypto_instance *inst)
541{
542	struct ahash_instance *ahash = ahash_instance(inst);
543
544	ahash->free(ahash);
545}
546
547static int __maybe_unused crypto_ahash_report(
548	struct sk_buff *skb, struct crypto_alg *alg)
549{
550	struct crypto_report_hash rhash;
551
552	memset(&rhash, 0, sizeof(rhash));
553
554	strscpy(rhash.type, "ahash", sizeof(rhash.type));
555
556	rhash.blocksize = alg->cra_blocksize;
557	rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
558
559	return nla_put(skb, CRYPTOCFGA_REPORT_HASH, sizeof(rhash), &rhash);
560}
 
 
 
 
 
 
561
562static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
563	__maybe_unused;
564static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
565{
566	seq_printf(m, "type         : ahash\n");
567	seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
568					     "yes" : "no");
569	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
570	seq_printf(m, "digestsize   : %u\n",
571		   __crypto_hash_alg_common(alg)->digestsize);
572}
573
574static int __maybe_unused crypto_ahash_report_stat(
575	struct sk_buff *skb, struct crypto_alg *alg)
576{
577	return crypto_hash_report_stat(skb, alg, "ahash");
578}
579
580static const struct crypto_type crypto_ahash_type = {
581	.extsize = crypto_ahash_extsize,
582	.init_tfm = crypto_ahash_init_tfm,
583	.free = crypto_ahash_free_instance,
584#ifdef CONFIG_PROC_FS
585	.show = crypto_ahash_show,
586#endif
587#if IS_ENABLED(CONFIG_CRYPTO_USER)
588	.report = crypto_ahash_report,
589#endif
590#ifdef CONFIG_CRYPTO_STATS
591	.report_stat = crypto_ahash_report_stat,
592#endif
593	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
594	.maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
595	.type = CRYPTO_ALG_TYPE_AHASH,
596	.tfmsize = offsetof(struct crypto_ahash, base),
597};
598
599int crypto_grab_ahash(struct crypto_ahash_spawn *spawn,
600		      struct crypto_instance *inst,
601		      const char *name, u32 type, u32 mask)
602{
603	spawn->base.frontend = &crypto_ahash_type;
604	return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
605}
606EXPORT_SYMBOL_GPL(crypto_grab_ahash);
607
608struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
609					u32 mask)
610{
611	return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
612}
613EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
614
615int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
616{
617	return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask);
618}
619EXPORT_SYMBOL_GPL(crypto_has_ahash);
620
621struct crypto_ahash *crypto_clone_ahash(struct crypto_ahash *hash)
622{
623	struct hash_alg_common *halg = crypto_hash_alg_common(hash);
624	struct crypto_tfm *tfm = crypto_ahash_tfm(hash);
625	struct crypto_ahash *nhash;
626	struct ahash_alg *alg;
627	int err;
628
629	if (!crypto_hash_alg_has_setkey(halg)) {
630		tfm = crypto_tfm_get(tfm);
631		if (IS_ERR(tfm))
632			return ERR_CAST(tfm);
633
634		return hash;
635	}
636
637	nhash = crypto_clone_tfm(&crypto_ahash_type, tfm);
638
639	if (IS_ERR(nhash))
640		return nhash;
641
642	nhash->reqsize = hash->reqsize;
643	nhash->statesize = hash->statesize;
644
645	if (likely(hash->using_shash)) {
646		struct crypto_shash **nctx = crypto_ahash_ctx(nhash);
647		struct crypto_shash *shash;
648
649		shash = crypto_clone_shash(ahash_to_shash(hash));
650		if (IS_ERR(shash)) {
651			err = PTR_ERR(shash);
652			goto out_free_nhash;
653		}
654		nhash->using_shash = true;
655		*nctx = shash;
656		return nhash;
657	}
658
659	err = -ENOSYS;
660	alg = crypto_ahash_alg(hash);
661	if (!alg->clone_tfm)
662		goto out_free_nhash;
663
664	err = alg->clone_tfm(nhash, hash);
665	if (err)
666		goto out_free_nhash;
667
668	return nhash;
669
670out_free_nhash:
671	crypto_free_ahash(nhash);
672	return ERR_PTR(err);
673}
674EXPORT_SYMBOL_GPL(crypto_clone_ahash);
675
676static int ahash_prepare_alg(struct ahash_alg *alg)
677{
678	struct crypto_alg *base = &alg->halg.base;
679	int err;
680
681	if (alg->halg.statesize == 0)
 
 
682		return -EINVAL;
683
684	err = hash_prepare_alg(&alg->halg);
685	if (err)
686		return err;
687
688	base->cra_type = &crypto_ahash_type;
 
689	base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
690
691	if (!alg->finup)
692		alg->finup = ahash_def_finup;
693	if (!alg->setkey)
694		alg->setkey = ahash_nosetkey;
695
696	return 0;
697}
698
699int crypto_register_ahash(struct ahash_alg *alg)
700{
701	struct crypto_alg *base = &alg->halg.base;
702	int err;
703
704	err = ahash_prepare_alg(alg);
705	if (err)
706		return err;
707
708	return crypto_register_alg(base);
709}
710EXPORT_SYMBOL_GPL(crypto_register_ahash);
711
712void crypto_unregister_ahash(struct ahash_alg *alg)
713{
714	crypto_unregister_alg(&alg->halg.base);
715}
716EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
717
718int crypto_register_ahashes(struct ahash_alg *algs, int count)
719{
720	int i, ret;
721
722	for (i = 0; i < count; i++) {
723		ret = crypto_register_ahash(&algs[i]);
724		if (ret)
725			goto err;
726	}
727
728	return 0;
729
730err:
731	for (--i; i >= 0; --i)
732		crypto_unregister_ahash(&algs[i]);
733
734	return ret;
735}
736EXPORT_SYMBOL_GPL(crypto_register_ahashes);
737
738void crypto_unregister_ahashes(struct ahash_alg *algs, int count)
739{
740	int i;
741
742	for (i = count - 1; i >= 0; --i)
743		crypto_unregister_ahash(&algs[i]);
744}
745EXPORT_SYMBOL_GPL(crypto_unregister_ahashes);
746
747int ahash_register_instance(struct crypto_template *tmpl,
748			    struct ahash_instance *inst)
749{
750	int err;
751
752	if (WARN_ON(!inst->free))
753		return -EINVAL;
754
755	err = ahash_prepare_alg(&inst->alg);
756	if (err)
757		return err;
758
759	return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
760}
761EXPORT_SYMBOL_GPL(ahash_register_instance);
762
763bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
764{
765	struct crypto_alg *alg = &halg->base;
766
767	if (alg->cra_type == &crypto_shash_type)
768		return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg));
769
770	return __crypto_ahash_alg(alg)->setkey != ahash_nosetkey;
771}
772EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey);
773
774MODULE_LICENSE("GPL");
775MODULE_DESCRIPTION("Asynchronous cryptographic hash type");