Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 * Asynchronous Cryptographic Hash operations.
  3 *
  4 * This is the asynchronous version of hash.c with notification of
  5 * completion via a callback.
  6 *
  7 * Copyright (c) 2008 Loc Ho <lho@amcc.com>
  8 *
  9 * This program is free software; you can redistribute it and/or modify it
 10 * under the terms of the GNU General Public License as published by the Free
 11 * Software Foundation; either version 2 of the License, or (at your option)
 12 * any later version.
 13 *
 14 */
 15
 16#include <crypto/internal/hash.h>
 17#include <crypto/scatterwalk.h>
 18#include <linux/bug.h>
 19#include <linux/err.h>
 20#include <linux/kernel.h>
 21#include <linux/module.h>
 22#include <linux/sched.h>
 23#include <linux/slab.h>
 24#include <linux/seq_file.h>
 25#include <linux/cryptouser.h>
 
 26#include <net/netlink.h>
 27
 28#include "internal.h"
 29
 
 
 30struct ahash_request_priv {
 31	crypto_completion_t complete;
 32	void *data;
 33	u8 *result;
 
 34	void *ubuf[] CRYPTO_MINALIGN_ATTR;
 35};
 36
 37static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
 38{
 39	return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
 40			    halg);
 41}
 42
 43static int hash_walk_next(struct crypto_hash_walk *walk)
 44{
 45	unsigned int alignmask = walk->alignmask;
 46	unsigned int offset = walk->offset;
 47	unsigned int nbytes = min(walk->entrylen,
 48				  ((unsigned int)(PAGE_SIZE)) - offset);
 49
 50	if (walk->flags & CRYPTO_ALG_ASYNC)
 51		walk->data = kmap(walk->pg);
 52	else
 53		walk->data = kmap_atomic(walk->pg);
 54	walk->data += offset;
 55
 56	if (offset & alignmask) {
 57		unsigned int unaligned = alignmask + 1 - (offset & alignmask);
 58
 59		if (nbytes > unaligned)
 60			nbytes = unaligned;
 61	}
 62
 63	walk->entrylen -= nbytes;
 64	return nbytes;
 65}
 66
 67static int hash_walk_new_entry(struct crypto_hash_walk *walk)
 68{
 69	struct scatterlist *sg;
 70
 71	sg = walk->sg;
 72	walk->offset = sg->offset;
 73	walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
 74	walk->offset = offset_in_page(walk->offset);
 75	walk->entrylen = sg->length;
 76
 77	if (walk->entrylen > walk->total)
 78		walk->entrylen = walk->total;
 79	walk->total -= walk->entrylen;
 80
 81	return hash_walk_next(walk);
 82}
 83
 84int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
 85{
 86	unsigned int alignmask = walk->alignmask;
 87	unsigned int nbytes = walk->entrylen;
 88
 89	walk->data -= walk->offset;
 90
 91	if (nbytes && walk->offset & alignmask && !err) {
 92		walk->offset = ALIGN(walk->offset, alignmask + 1);
 93		walk->data += walk->offset;
 94
 95		nbytes = min(nbytes,
 96			     ((unsigned int)(PAGE_SIZE)) - walk->offset);
 97		walk->entrylen -= nbytes;
 98
 99		return nbytes;
 
 
 
100	}
101
102	if (walk->flags & CRYPTO_ALG_ASYNC)
103		kunmap(walk->pg);
104	else {
105		kunmap_atomic(walk->data);
106		/*
107		 * The may sleep test only makes sense for sync users.
108		 * Async users don't need to sleep here anyway.
109		 */
110		crypto_yield(walk->flags);
111	}
112
113	if (err)
114		return err;
115
116	if (nbytes) {
117		walk->offset = 0;
118		walk->pg++;
119		return hash_walk_next(walk);
120	}
121
122	if (!walk->total)
123		return 0;
124
125	walk->sg = sg_next(walk->sg);
126
127	return hash_walk_new_entry(walk);
128}
129EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
130
131int crypto_hash_walk_first(struct ahash_request *req,
132			   struct crypto_hash_walk *walk)
133{
134	walk->total = req->nbytes;
135
136	if (!walk->total) {
137		walk->entrylen = 0;
138		return 0;
139	}
140
141	walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
142	walk->sg = req->src;
143	walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
144
145	return hash_walk_new_entry(walk);
146}
147EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
148
149int crypto_ahash_walk_first(struct ahash_request *req,
150			    struct crypto_hash_walk *walk)
151{
152	walk->total = req->nbytes;
153
154	if (!walk->total) {
155		walk->entrylen = 0;
156		return 0;
157	}
158
159	walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
160	walk->sg = req->src;
161	walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
162	walk->flags |= CRYPTO_ALG_ASYNC;
163
164	BUILD_BUG_ON(CRYPTO_TFM_REQ_MASK & CRYPTO_ALG_ASYNC);
165
166	return hash_walk_new_entry(walk);
167}
168EXPORT_SYMBOL_GPL(crypto_ahash_walk_first);
169
170static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
171				unsigned int keylen)
172{
173	unsigned long alignmask = crypto_ahash_alignmask(tfm);
174	int ret;
175	u8 *buffer, *alignbuffer;
176	unsigned long absize;
177
178	absize = keylen + alignmask;
179	buffer = kmalloc(absize, GFP_KERNEL);
180	if (!buffer)
181		return -ENOMEM;
182
183	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
184	memcpy(alignbuffer, key, keylen);
185	ret = tfm->setkey(tfm, alignbuffer, keylen);
186	kzfree(buffer);
187	return ret;
188}
189
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
190int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
191			unsigned int keylen)
192{
193	unsigned long alignmask = crypto_ahash_alignmask(tfm);
 
194
195	if ((unsigned long)key & alignmask)
196		return ahash_setkey_unaligned(tfm, key, keylen);
 
 
197
198	return tfm->setkey(tfm, key, keylen);
199}
200EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
 
201
202static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
203			  unsigned int keylen)
204{
205	return -ENOSYS;
206}
 
207
208static inline unsigned int ahash_align_buffer_size(unsigned len,
209						   unsigned long mask)
210{
211	return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
212}
213
214static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
215{
216	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
217	unsigned long alignmask = crypto_ahash_alignmask(tfm);
218	unsigned int ds = crypto_ahash_digestsize(tfm);
219	struct ahash_request_priv *priv;
220
221	priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
222		       (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
223		       GFP_KERNEL : GFP_ATOMIC);
224	if (!priv)
225		return -ENOMEM;
226
227	/*
228	 * WARNING: Voodoo programming below!
229	 *
230	 * The code below is obscure and hard to understand, thus explanation
231	 * is necessary. See include/crypto/hash.h and include/linux/crypto.h
232	 * to understand the layout of structures used here!
233	 *
234	 * The code here will replace portions of the ORIGINAL request with
235	 * pointers to new code and buffers so the hashing operation can store
236	 * the result in aligned buffer. We will call the modified request
237	 * an ADJUSTED request.
238	 *
239	 * The newly mangled request will look as such:
240	 *
241	 * req {
242	 *   .result        = ADJUSTED[new aligned buffer]
243	 *   .base.complete = ADJUSTED[pointer to completion function]
244	 *   .base.data     = ADJUSTED[*req (pointer to self)]
245	 *   .priv          = ADJUSTED[new priv] {
246	 *           .result   = ORIGINAL(result)
247	 *           .complete = ORIGINAL(base.complete)
248	 *           .data     = ORIGINAL(base.data)
249	 *   }
250	 */
251
252	priv->result = req->result;
253	priv->complete = req->base.complete;
254	priv->data = req->base.data;
 
 
255	/*
256	 * WARNING: We do not backup req->priv here! The req->priv
257	 *          is for internal use of the Crypto API and the
258	 *          user must _NOT_ _EVER_ depend on it's content!
259	 */
260
261	req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
262	req->base.complete = cplt;
263	req->base.data = req;
264	req->priv = priv;
265
266	return 0;
267}
268
269static void ahash_restore_req(struct ahash_request *req)
270{
271	struct ahash_request_priv *priv = req->priv;
272
 
 
 
 
273	/* Restore the original crypto request. */
274	req->result = priv->result;
275	req->base.complete = priv->complete;
276	req->base.data = priv->data;
 
277	req->priv = NULL;
278
279	/* Free the req->priv.priv from the ADJUSTED request. */
280	kzfree(priv);
281}
282
283static void ahash_op_unaligned_finish(struct ahash_request *req, int err)
284{
285	struct ahash_request_priv *priv = req->priv;
 
286
287	if (err == -EINPROGRESS)
288		return;
289
290	if (!err)
291		memcpy(priv->result, req->result,
292		       crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
293
294	ahash_restore_req(req);
295}
296
297static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
298{
299	struct ahash_request *areq = req->data;
300
 
 
 
 
 
301	/*
302	 * Restore the original request, see ahash_op_unaligned() for what
303	 * goes where.
304	 *
305	 * The "struct ahash_request *req" here is in fact the "req.base"
306	 * from the ADJUSTED request from ahash_op_unaligned(), thus as it
307	 * is a pointer to self, it is also the ADJUSTED "req" .
308	 */
309
310	/* First copy req->result into req->priv.result */
311	ahash_op_unaligned_finish(areq, err);
312
313	/* Complete the ORIGINAL request. */
314	areq->base.complete(&areq->base, err);
315}
316
317static int ahash_op_unaligned(struct ahash_request *req,
318			      int (*op)(struct ahash_request *))
319{
320	int err;
321
322	err = ahash_save_req(req, ahash_op_unaligned_done);
323	if (err)
324		return err;
325
326	err = op(req);
327	ahash_op_unaligned_finish(req, err);
 
 
 
328
329	return err;
330}
331
332static int crypto_ahash_op(struct ahash_request *req,
333			   int (*op)(struct ahash_request *))
334{
335	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
336	unsigned long alignmask = crypto_ahash_alignmask(tfm);
337
338	if ((unsigned long)req->result & alignmask)
339		return ahash_op_unaligned(req, op);
340
341	return op(req);
342}
343
344int crypto_ahash_final(struct ahash_request *req)
345{
346	return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
 
 
 
 
 
 
 
 
347}
348EXPORT_SYMBOL_GPL(crypto_ahash_final);
349
350int crypto_ahash_finup(struct ahash_request *req)
351{
352	return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
 
 
 
 
 
 
 
 
353}
354EXPORT_SYMBOL_GPL(crypto_ahash_finup);
355
356int crypto_ahash_digest(struct ahash_request *req)
357{
358	return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest);
 
 
 
 
 
 
 
 
 
 
 
359}
360EXPORT_SYMBOL_GPL(crypto_ahash_digest);
361
362static void ahash_def_finup_finish2(struct ahash_request *req, int err)
363{
364	struct ahash_request_priv *priv = req->priv;
365
366	if (err == -EINPROGRESS)
367		return;
368
369	if (!err)
370		memcpy(priv->result, req->result,
371		       crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
372
373	ahash_restore_req(req);
374}
375
376static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
377{
378	struct ahash_request *areq = req->data;
379
380	ahash_def_finup_finish2(areq, err);
381
382	areq->base.complete(&areq->base, err);
383}
384
385static int ahash_def_finup_finish1(struct ahash_request *req, int err)
386{
387	if (err)
388		goto out;
389
390	req->base.complete = ahash_def_finup_done2;
391	req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
392	err = crypto_ahash_reqtfm(req)->final(req);
 
 
393
394out:
395	ahash_def_finup_finish2(req, err);
396	return err;
397}
398
399static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
400{
401	struct ahash_request *areq = req->data;
402
 
 
 
 
 
 
 
403	err = ahash_def_finup_finish1(areq, err);
 
 
404
405	areq->base.complete(&areq->base, err);
406}
407
408static int ahash_def_finup(struct ahash_request *req)
409{
410	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
411	int err;
412
413	err = ahash_save_req(req, ahash_def_finup_done1);
414	if (err)
415		return err;
416
417	err = tfm->update(req);
 
 
 
418	return ahash_def_finup_finish1(req, err);
419}
420
421static int ahash_no_export(struct ahash_request *req, void *out)
422{
423	return -ENOSYS;
424}
425
426static int ahash_no_import(struct ahash_request *req, const void *in)
427{
428	return -ENOSYS;
429}
430
431static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
432{
433	struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
434	struct ahash_alg *alg = crypto_ahash_alg(hash);
435
436	hash->setkey = ahash_nosetkey;
437	hash->has_setkey = false;
438	hash->export = ahash_no_export;
439	hash->import = ahash_no_import;
440
441	if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
442		return crypto_init_shash_ops_async(tfm);
443
444	hash->init = alg->init;
445	hash->update = alg->update;
446	hash->final = alg->final;
447	hash->finup = alg->finup ?: ahash_def_finup;
448	hash->digest = alg->digest;
 
 
449
450	if (alg->setkey) {
451		hash->setkey = alg->setkey;
452		hash->has_setkey = true;
453	}
454	if (alg->export)
455		hash->export = alg->export;
456	if (alg->import)
457		hash->import = alg->import;
458
459	return 0;
 
 
 
460}
461
462static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
463{
464	if (alg->cra_type == &crypto_ahash_type)
465		return alg->cra_ctxsize;
466
467	return sizeof(struct crypto_shash *);
 
 
 
 
 
 
 
468}
469
470#ifdef CONFIG_NET
471static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
472{
473	struct crypto_report_hash rhash;
474
475	strncpy(rhash.type, "ahash", sizeof(rhash.type));
 
 
476
477	rhash.blocksize = alg->cra_blocksize;
478	rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
479
480	if (nla_put(skb, CRYPTOCFGA_REPORT_HASH,
481		    sizeof(struct crypto_report_hash), &rhash))
482		goto nla_put_failure;
483	return 0;
484
485nla_put_failure:
486	return -EMSGSIZE;
487}
488#else
489static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
490{
491	return -ENOSYS;
492}
493#endif
494
495static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
496	__attribute__ ((unused));
497static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
498{
499	seq_printf(m, "type         : ahash\n");
500	seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
501					     "yes" : "no");
502	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
503	seq_printf(m, "digestsize   : %u\n",
504		   __crypto_hash_alg_common(alg)->digestsize);
505}
506
507const struct crypto_type crypto_ahash_type = {
508	.extsize = crypto_ahash_extsize,
509	.init_tfm = crypto_ahash_init_tfm,
 
510#ifdef CONFIG_PROC_FS
511	.show = crypto_ahash_show,
512#endif
513	.report = crypto_ahash_report,
514	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
515	.maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
516	.type = CRYPTO_ALG_TYPE_AHASH,
517	.tfmsize = offsetof(struct crypto_ahash, base),
518};
519EXPORT_SYMBOL_GPL(crypto_ahash_type);
 
 
 
 
 
 
 
 
520
521struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
522					u32 mask)
523{
524	return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
525}
526EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
527
528int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
529{
530	return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask);
531}
532EXPORT_SYMBOL_GPL(crypto_has_ahash);
533
534static int ahash_prepare_alg(struct ahash_alg *alg)
535{
536	struct crypto_alg *base = &alg->halg.base;
537
538	if (alg->halg.digestsize > PAGE_SIZE / 8 ||
539	    alg->halg.statesize > PAGE_SIZE / 8 ||
540	    alg->halg.statesize == 0)
541		return -EINVAL;
542
543	base->cra_type = &crypto_ahash_type;
544	base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
545	base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
546
547	return 0;
548}
549
550int crypto_register_ahash(struct ahash_alg *alg)
551{
552	struct crypto_alg *base = &alg->halg.base;
553	int err;
554
555	err = ahash_prepare_alg(alg);
556	if (err)
557		return err;
558
559	return crypto_register_alg(base);
560}
561EXPORT_SYMBOL_GPL(crypto_register_ahash);
562
563int crypto_unregister_ahash(struct ahash_alg *alg)
564{
565	return crypto_unregister_alg(&alg->halg.base);
566}
567EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
568
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
569int ahash_register_instance(struct crypto_template *tmpl,
570			    struct ahash_instance *inst)
571{
572	int err;
573
 
 
 
574	err = ahash_prepare_alg(&inst->alg);
575	if (err)
576		return err;
577
578	return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
579}
580EXPORT_SYMBOL_GPL(ahash_register_instance);
581
582void ahash_free_instance(struct crypto_instance *inst)
583{
584	crypto_drop_spawn(crypto_instance_ctx(inst));
585	kfree(ahash_instance(inst));
586}
587EXPORT_SYMBOL_GPL(ahash_free_instance);
588
589int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
590			    struct hash_alg_common *alg,
591			    struct crypto_instance *inst)
592{
593	return crypto_init_spawn2(&spawn->base, &alg->base, inst,
594				  &crypto_ahash_type);
595}
596EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn);
597
598struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
599{
600	struct crypto_alg *alg;
601
602	alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask);
603	return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg);
604}
605EXPORT_SYMBOL_GPL(ahash_attr_alg);
606
607MODULE_LICENSE("GPL");
608MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Asynchronous Cryptographic Hash operations.
  4 *
  5 * This is the asynchronous version of hash.c with notification of
  6 * completion via a callback.
  7 *
  8 * Copyright (c) 2008 Loc Ho <lho@amcc.com>
 
 
 
 
 
 
  9 */
 10
 11#include <crypto/internal/hash.h>
 12#include <crypto/scatterwalk.h>
 
 13#include <linux/err.h>
 14#include <linux/kernel.h>
 15#include <linux/module.h>
 16#include <linux/sched.h>
 17#include <linux/slab.h>
 18#include <linux/seq_file.h>
 19#include <linux/cryptouser.h>
 20#include <linux/compiler.h>
 21#include <net/netlink.h>
 22
 23#include "internal.h"
 24
 25static const struct crypto_type crypto_ahash_type;
 26
 27struct ahash_request_priv {
 28	crypto_completion_t complete;
 29	void *data;
 30	u8 *result;
 31	u32 flags;
 32	void *ubuf[] CRYPTO_MINALIGN_ATTR;
 33};
 34
 35static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
 36{
 37	return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
 38			    halg);
 39}
 40
 41static int hash_walk_next(struct crypto_hash_walk *walk)
 42{
 43	unsigned int alignmask = walk->alignmask;
 44	unsigned int offset = walk->offset;
 45	unsigned int nbytes = min(walk->entrylen,
 46				  ((unsigned int)(PAGE_SIZE)) - offset);
 47
 48	walk->data = kmap_atomic(walk->pg);
 
 
 
 49	walk->data += offset;
 50
 51	if (offset & alignmask) {
 52		unsigned int unaligned = alignmask + 1 - (offset & alignmask);
 53
 54		if (nbytes > unaligned)
 55			nbytes = unaligned;
 56	}
 57
 58	walk->entrylen -= nbytes;
 59	return nbytes;
 60}
 61
 62static int hash_walk_new_entry(struct crypto_hash_walk *walk)
 63{
 64	struct scatterlist *sg;
 65
 66	sg = walk->sg;
 67	walk->offset = sg->offset;
 68	walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
 69	walk->offset = offset_in_page(walk->offset);
 70	walk->entrylen = sg->length;
 71
 72	if (walk->entrylen > walk->total)
 73		walk->entrylen = walk->total;
 74	walk->total -= walk->entrylen;
 75
 76	return hash_walk_next(walk);
 77}
 78
 79int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
 80{
 81	unsigned int alignmask = walk->alignmask;
 
 82
 83	walk->data -= walk->offset;
 84
 85	if (walk->entrylen && (walk->offset & alignmask) && !err) {
 86		unsigned int nbytes;
 
 87
 88		walk->offset = ALIGN(walk->offset, alignmask + 1);
 89		nbytes = min(walk->entrylen,
 90			     (unsigned int)(PAGE_SIZE - walk->offset));
 91		if (nbytes) {
 92			walk->entrylen -= nbytes;
 93			walk->data += walk->offset;
 94			return nbytes;
 95		}
 96	}
 97
 98	kunmap_atomic(walk->data);
 99	crypto_yield(walk->flags);
 
 
 
 
 
 
 
 
100
101	if (err)
102		return err;
103
104	if (walk->entrylen) {
105		walk->offset = 0;
106		walk->pg++;
107		return hash_walk_next(walk);
108	}
109
110	if (!walk->total)
111		return 0;
112
113	walk->sg = sg_next(walk->sg);
114
115	return hash_walk_new_entry(walk);
116}
117EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
118
119int crypto_hash_walk_first(struct ahash_request *req,
120			   struct crypto_hash_walk *walk)
121{
122	walk->total = req->nbytes;
123
124	if (!walk->total) {
125		walk->entrylen = 0;
126		return 0;
127	}
128
129	walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
130	walk->sg = req->src;
131	walk->flags = req->base.flags;
132
133	return hash_walk_new_entry(walk);
134}
135EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
136
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
137static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
138				unsigned int keylen)
139{
140	unsigned long alignmask = crypto_ahash_alignmask(tfm);
141	int ret;
142	u8 *buffer, *alignbuffer;
143	unsigned long absize;
144
145	absize = keylen + alignmask;
146	buffer = kmalloc(absize, GFP_KERNEL);
147	if (!buffer)
148		return -ENOMEM;
149
150	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
151	memcpy(alignbuffer, key, keylen);
152	ret = tfm->setkey(tfm, alignbuffer, keylen);
153	kfree_sensitive(buffer);
154	return ret;
155}
156
157static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
158			  unsigned int keylen)
159{
160	return -ENOSYS;
161}
162
163static void ahash_set_needkey(struct crypto_ahash *tfm)
164{
165	const struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
166
167	if (tfm->setkey != ahash_nosetkey &&
168	    !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
169		crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
170}
171
172int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
173			unsigned int keylen)
174{
175	unsigned long alignmask = crypto_ahash_alignmask(tfm);
176	int err;
177
178	if ((unsigned long)key & alignmask)
179		err = ahash_setkey_unaligned(tfm, key, keylen);
180	else
181		err = tfm->setkey(tfm, key, keylen);
182
183	if (unlikely(err)) {
184		ahash_set_needkey(tfm);
185		return err;
186	}
187
188	crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
189	return 0;
 
 
190}
191EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
192
193static inline unsigned int ahash_align_buffer_size(unsigned len,
194						   unsigned long mask)
195{
196	return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
197}
198
199static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
200{
201	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
202	unsigned long alignmask = crypto_ahash_alignmask(tfm);
203	unsigned int ds = crypto_ahash_digestsize(tfm);
204	struct ahash_request_priv *priv;
205
206	priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
207		       (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
208		       GFP_KERNEL : GFP_ATOMIC);
209	if (!priv)
210		return -ENOMEM;
211
212	/*
213	 * WARNING: Voodoo programming below!
214	 *
215	 * The code below is obscure and hard to understand, thus explanation
216	 * is necessary. See include/crypto/hash.h and include/linux/crypto.h
217	 * to understand the layout of structures used here!
218	 *
219	 * The code here will replace portions of the ORIGINAL request with
220	 * pointers to new code and buffers so the hashing operation can store
221	 * the result in aligned buffer. We will call the modified request
222	 * an ADJUSTED request.
223	 *
224	 * The newly mangled request will look as such:
225	 *
226	 * req {
227	 *   .result        = ADJUSTED[new aligned buffer]
228	 *   .base.complete = ADJUSTED[pointer to completion function]
229	 *   .base.data     = ADJUSTED[*req (pointer to self)]
230	 *   .priv          = ADJUSTED[new priv] {
231	 *           .result   = ORIGINAL(result)
232	 *           .complete = ORIGINAL(base.complete)
233	 *           .data     = ORIGINAL(base.data)
234	 *   }
235	 */
236
237	priv->result = req->result;
238	priv->complete = req->base.complete;
239	priv->data = req->base.data;
240	priv->flags = req->base.flags;
241
242	/*
243	 * WARNING: We do not backup req->priv here! The req->priv
244	 *          is for internal use of the Crypto API and the
245	 *          user must _NOT_ _EVER_ depend on it's content!
246	 */
247
248	req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
249	req->base.complete = cplt;
250	req->base.data = req;
251	req->priv = priv;
252
253	return 0;
254}
255
256static void ahash_restore_req(struct ahash_request *req, int err)
257{
258	struct ahash_request_priv *priv = req->priv;
259
260	if (!err)
261		memcpy(priv->result, req->result,
262		       crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
263
264	/* Restore the original crypto request. */
265	req->result = priv->result;
266
267	ahash_request_set_callback(req, priv->flags,
268				   priv->complete, priv->data);
269	req->priv = NULL;
270
271	/* Free the req->priv.priv from the ADJUSTED request. */
272	kfree_sensitive(priv);
273}
274
275static void ahash_notify_einprogress(struct ahash_request *req)
276{
277	struct ahash_request_priv *priv = req->priv;
278	struct crypto_async_request oreq;
279
280	oreq.data = priv->data;
 
281
282	priv->complete(&oreq, -EINPROGRESS);
 
 
 
 
283}
284
285static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
286{
287	struct ahash_request *areq = req->data;
288
289	if (err == -EINPROGRESS) {
290		ahash_notify_einprogress(areq);
291		return;
292	}
293
294	/*
295	 * Restore the original request, see ahash_op_unaligned() for what
296	 * goes where.
297	 *
298	 * The "struct ahash_request *req" here is in fact the "req.base"
299	 * from the ADJUSTED request from ahash_op_unaligned(), thus as it
300	 * is a pointer to self, it is also the ADJUSTED "req" .
301	 */
302
303	/* First copy req->result into req->priv.result */
304	ahash_restore_req(areq, err);
305
306	/* Complete the ORIGINAL request. */
307	areq->base.complete(&areq->base, err);
308}
309
310static int ahash_op_unaligned(struct ahash_request *req,
311			      int (*op)(struct ahash_request *))
312{
313	int err;
314
315	err = ahash_save_req(req, ahash_op_unaligned_done);
316	if (err)
317		return err;
318
319	err = op(req);
320	if (err == -EINPROGRESS || err == -EBUSY)
321		return err;
322
323	ahash_restore_req(req, err);
324
325	return err;
326}
327
328static int crypto_ahash_op(struct ahash_request *req,
329			   int (*op)(struct ahash_request *))
330{
331	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
332	unsigned long alignmask = crypto_ahash_alignmask(tfm);
333
334	if ((unsigned long)req->result & alignmask)
335		return ahash_op_unaligned(req, op);
336
337	return op(req);
338}
339
340int crypto_ahash_final(struct ahash_request *req)
341{
342	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
343	struct crypto_alg *alg = tfm->base.__crt_alg;
344	unsigned int nbytes = req->nbytes;
345	int ret;
346
347	crypto_stats_get(alg);
348	ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
349	crypto_stats_ahash_final(nbytes, ret, alg);
350	return ret;
351}
352EXPORT_SYMBOL_GPL(crypto_ahash_final);
353
354int crypto_ahash_finup(struct ahash_request *req)
355{
356	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
357	struct crypto_alg *alg = tfm->base.__crt_alg;
358	unsigned int nbytes = req->nbytes;
359	int ret;
360
361	crypto_stats_get(alg);
362	ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
363	crypto_stats_ahash_final(nbytes, ret, alg);
364	return ret;
365}
366EXPORT_SYMBOL_GPL(crypto_ahash_finup);
367
368int crypto_ahash_digest(struct ahash_request *req)
369{
370	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
371	struct crypto_alg *alg = tfm->base.__crt_alg;
372	unsigned int nbytes = req->nbytes;
373	int ret;
374
375	crypto_stats_get(alg);
376	if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
377		ret = -ENOKEY;
378	else
379		ret = crypto_ahash_op(req, tfm->digest);
380	crypto_stats_ahash_final(nbytes, ret, alg);
381	return ret;
382}
383EXPORT_SYMBOL_GPL(crypto_ahash_digest);
384
385static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
386{
387	struct ahash_request *areq = req->data;
388
389	if (err == -EINPROGRESS)
390		return;
391
392	ahash_restore_req(areq, err);
 
 
 
 
 
 
 
 
 
 
 
393
394	areq->base.complete(&areq->base, err);
395}
396
397static int ahash_def_finup_finish1(struct ahash_request *req, int err)
398{
399	if (err)
400		goto out;
401
402	req->base.complete = ahash_def_finup_done2;
403
404	err = crypto_ahash_reqtfm(req)->final(req);
405	if (err == -EINPROGRESS || err == -EBUSY)
406		return err;
407
408out:
409	ahash_restore_req(req, err);
410	return err;
411}
412
413static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
414{
415	struct ahash_request *areq = req->data;
416
417	if (err == -EINPROGRESS) {
418		ahash_notify_einprogress(areq);
419		return;
420	}
421
422	areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
423
424	err = ahash_def_finup_finish1(areq, err);
425	if (areq->priv)
426		return;
427
428	areq->base.complete(&areq->base, err);
429}
430
431static int ahash_def_finup(struct ahash_request *req)
432{
433	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
434	int err;
435
436	err = ahash_save_req(req, ahash_def_finup_done1);
437	if (err)
438		return err;
439
440	err = tfm->update(req);
441	if (err == -EINPROGRESS || err == -EBUSY)
442		return err;
443
444	return ahash_def_finup_finish1(req, err);
445}
446
447static void crypto_ahash_exit_tfm(struct crypto_tfm *tfm)
448{
449	struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
450	struct ahash_alg *alg = crypto_ahash_alg(hash);
451
452	alg->exit_tfm(hash);
 
 
453}
454
455static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
456{
457	struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
458	struct ahash_alg *alg = crypto_ahash_alg(hash);
459
460	hash->setkey = ahash_nosetkey;
 
 
 
461
462	if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
463		return crypto_init_shash_ops_async(tfm);
464
465	hash->init = alg->init;
466	hash->update = alg->update;
467	hash->final = alg->final;
468	hash->finup = alg->finup ?: ahash_def_finup;
469	hash->digest = alg->digest;
470	hash->export = alg->export;
471	hash->import = alg->import;
472
473	if (alg->setkey) {
474		hash->setkey = alg->setkey;
475		ahash_set_needkey(hash);
476	}
 
 
 
 
477
478	if (alg->exit_tfm)
479		tfm->exit = crypto_ahash_exit_tfm;
480
481	return alg->init_tfm ? alg->init_tfm(hash) : 0;
482}
483
484static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
485{
486	if (alg->cra_type != &crypto_ahash_type)
487		return sizeof(struct crypto_shash *);
488
489	return crypto_alg_extsize(alg);
490}
491
492static void crypto_ahash_free_instance(struct crypto_instance *inst)
493{
494	struct ahash_instance *ahash = ahash_instance(inst);
495
496	ahash->free(ahash);
497}
498
499#ifdef CONFIG_NET
500static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
501{
502	struct crypto_report_hash rhash;
503
504	memset(&rhash, 0, sizeof(rhash));
505
506	strscpy(rhash.type, "ahash", sizeof(rhash.type));
507
508	rhash.blocksize = alg->cra_blocksize;
509	rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
510
511	return nla_put(skb, CRYPTOCFGA_REPORT_HASH, sizeof(rhash), &rhash);
 
 
 
 
 
 
512}
513#else
514static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
515{
516	return -ENOSYS;
517}
518#endif
519
520static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
521	__maybe_unused;
522static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
523{
524	seq_printf(m, "type         : ahash\n");
525	seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
526					     "yes" : "no");
527	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
528	seq_printf(m, "digestsize   : %u\n",
529		   __crypto_hash_alg_common(alg)->digestsize);
530}
531
532static const struct crypto_type crypto_ahash_type = {
533	.extsize = crypto_ahash_extsize,
534	.init_tfm = crypto_ahash_init_tfm,
535	.free = crypto_ahash_free_instance,
536#ifdef CONFIG_PROC_FS
537	.show = crypto_ahash_show,
538#endif
539	.report = crypto_ahash_report,
540	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
541	.maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
542	.type = CRYPTO_ALG_TYPE_AHASH,
543	.tfmsize = offsetof(struct crypto_ahash, base),
544};
545
546int crypto_grab_ahash(struct crypto_ahash_spawn *spawn,
547		      struct crypto_instance *inst,
548		      const char *name, u32 type, u32 mask)
549{
550	spawn->base.frontend = &crypto_ahash_type;
551	return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
552}
553EXPORT_SYMBOL_GPL(crypto_grab_ahash);
554
555struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
556					u32 mask)
557{
558	return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
559}
560EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
561
562int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
563{
564	return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask);
565}
566EXPORT_SYMBOL_GPL(crypto_has_ahash);
567
568static int ahash_prepare_alg(struct ahash_alg *alg)
569{
570	struct crypto_alg *base = &alg->halg.base;
571
572	if (alg->halg.digestsize > HASH_MAX_DIGESTSIZE ||
573	    alg->halg.statesize > HASH_MAX_STATESIZE ||
574	    alg->halg.statesize == 0)
575		return -EINVAL;
576
577	base->cra_type = &crypto_ahash_type;
578	base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
579	base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
580
581	return 0;
582}
583
584int crypto_register_ahash(struct ahash_alg *alg)
585{
586	struct crypto_alg *base = &alg->halg.base;
587	int err;
588
589	err = ahash_prepare_alg(alg);
590	if (err)
591		return err;
592
593	return crypto_register_alg(base);
594}
595EXPORT_SYMBOL_GPL(crypto_register_ahash);
596
597void crypto_unregister_ahash(struct ahash_alg *alg)
598{
599	crypto_unregister_alg(&alg->halg.base);
600}
601EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
602
603int crypto_register_ahashes(struct ahash_alg *algs, int count)
604{
605	int i, ret;
606
607	for (i = 0; i < count; i++) {
608		ret = crypto_register_ahash(&algs[i]);
609		if (ret)
610			goto err;
611	}
612
613	return 0;
614
615err:
616	for (--i; i >= 0; --i)
617		crypto_unregister_ahash(&algs[i]);
618
619	return ret;
620}
621EXPORT_SYMBOL_GPL(crypto_register_ahashes);
622
623void crypto_unregister_ahashes(struct ahash_alg *algs, int count)
624{
625	int i;
626
627	for (i = count - 1; i >= 0; --i)
628		crypto_unregister_ahash(&algs[i]);
629}
630EXPORT_SYMBOL_GPL(crypto_unregister_ahashes);
631
632int ahash_register_instance(struct crypto_template *tmpl,
633			    struct ahash_instance *inst)
634{
635	int err;
636
637	if (WARN_ON(!inst->free))
638		return -EINVAL;
639
640	err = ahash_prepare_alg(&inst->alg);
641	if (err)
642		return err;
643
644	return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
645}
646EXPORT_SYMBOL_GPL(ahash_register_instance);
647
648bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
 
 
 
 
 
 
 
 
 
649{
650	struct crypto_alg *alg = &halg->base;
 
 
 
651
652	if (alg->cra_type != &crypto_ahash_type)
653		return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg));
 
654
655	return __crypto_ahash_alg(alg)->setkey != NULL;
 
656}
657EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey);
658
659MODULE_LICENSE("GPL");
660MODULE_DESCRIPTION("Asynchronous cryptographic hash type");