Linux Audio

Check our new training course

Loading...
v4.6
  1/*
  2 * Asynchronous Cryptographic Hash operations.
  3 *
  4 * This is the asynchronous version of hash.c with notification of
  5 * completion via a callback.
  6 *
  7 * Copyright (c) 2008 Loc Ho <lho@amcc.com>
  8 *
  9 * This program is free software; you can redistribute it and/or modify it
 10 * under the terms of the GNU General Public License as published by the Free
 11 * Software Foundation; either version 2 of the License, or (at your option)
 12 * any later version.
 13 *
 14 */
 15
 16#include <crypto/internal/hash.h>
 17#include <crypto/scatterwalk.h>
 18#include <linux/bug.h>
 19#include <linux/err.h>
 20#include <linux/kernel.h>
 21#include <linux/module.h>
 22#include <linux/sched.h>
 23#include <linux/slab.h>
 24#include <linux/seq_file.h>
 25#include <linux/cryptouser.h>
 
 26#include <net/netlink.h>
 27
 28#include "internal.h"
 29
 30struct ahash_request_priv {
 31	crypto_completion_t complete;
 32	void *data;
 33	u8 *result;
 
 34	void *ubuf[] CRYPTO_MINALIGN_ATTR;
 35};
 36
 37static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
 38{
 39	return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
 40			    halg);
 41}
 42
 43static int hash_walk_next(struct crypto_hash_walk *walk)
 44{
 45	unsigned int alignmask = walk->alignmask;
 46	unsigned int offset = walk->offset;
 47	unsigned int nbytes = min(walk->entrylen,
 48				  ((unsigned int)(PAGE_SIZE)) - offset);
 49
 50	if (walk->flags & CRYPTO_ALG_ASYNC)
 51		walk->data = kmap(walk->pg);
 52	else
 53		walk->data = kmap_atomic(walk->pg);
 54	walk->data += offset;
 55
 56	if (offset & alignmask) {
 57		unsigned int unaligned = alignmask + 1 - (offset & alignmask);
 58
 59		if (nbytes > unaligned)
 60			nbytes = unaligned;
 61	}
 62
 63	walk->entrylen -= nbytes;
 64	return nbytes;
 65}
 66
 67static int hash_walk_new_entry(struct crypto_hash_walk *walk)
 68{
 69	struct scatterlist *sg;
 70
 71	sg = walk->sg;
 72	walk->offset = sg->offset;
 73	walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
 74	walk->offset = offset_in_page(walk->offset);
 75	walk->entrylen = sg->length;
 76
 77	if (walk->entrylen > walk->total)
 78		walk->entrylen = walk->total;
 79	walk->total -= walk->entrylen;
 80
 81	return hash_walk_next(walk);
 82}
 83
 84int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
 85{
 86	unsigned int alignmask = walk->alignmask;
 87	unsigned int nbytes = walk->entrylen;
 88
 89	walk->data -= walk->offset;
 90
 91	if (nbytes && walk->offset & alignmask && !err) {
 92		walk->offset = ALIGN(walk->offset, alignmask + 1);
 93		walk->data += walk->offset;
 94
 95		nbytes = min(nbytes,
 96			     ((unsigned int)(PAGE_SIZE)) - walk->offset);
 97		walk->entrylen -= nbytes;
 98
 99		return nbytes;
 
 
 
100	}
101
102	if (walk->flags & CRYPTO_ALG_ASYNC)
103		kunmap(walk->pg);
104	else {
105		kunmap_atomic(walk->data);
106		/*
107		 * The may sleep test only makes sense for sync users.
108		 * Async users don't need to sleep here anyway.
109		 */
110		crypto_yield(walk->flags);
111	}
112
113	if (err)
114		return err;
115
116	if (nbytes) {
117		walk->offset = 0;
118		walk->pg++;
119		return hash_walk_next(walk);
120	}
121
122	if (!walk->total)
123		return 0;
124
125	walk->sg = sg_next(walk->sg);
126
127	return hash_walk_new_entry(walk);
128}
129EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
130
131int crypto_hash_walk_first(struct ahash_request *req,
132			   struct crypto_hash_walk *walk)
133{
134	walk->total = req->nbytes;
135
136	if (!walk->total) {
137		walk->entrylen = 0;
138		return 0;
139	}
140
141	walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
142	walk->sg = req->src;
143	walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
144
145	return hash_walk_new_entry(walk);
146}
147EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
148
149int crypto_ahash_walk_first(struct ahash_request *req,
150			    struct crypto_hash_walk *walk)
151{
152	walk->total = req->nbytes;
153
154	if (!walk->total) {
155		walk->entrylen = 0;
156		return 0;
157	}
158
159	walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
160	walk->sg = req->src;
161	walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
162	walk->flags |= CRYPTO_ALG_ASYNC;
163
164	BUILD_BUG_ON(CRYPTO_TFM_REQ_MASK & CRYPTO_ALG_ASYNC);
165
166	return hash_walk_new_entry(walk);
167}
168EXPORT_SYMBOL_GPL(crypto_ahash_walk_first);
169
170static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
171				unsigned int keylen)
172{
173	unsigned long alignmask = crypto_ahash_alignmask(tfm);
174	int ret;
175	u8 *buffer, *alignbuffer;
176	unsigned long absize;
177
178	absize = keylen + alignmask;
179	buffer = kmalloc(absize, GFP_KERNEL);
180	if (!buffer)
181		return -ENOMEM;
182
183	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
184	memcpy(alignbuffer, key, keylen);
185	ret = tfm->setkey(tfm, alignbuffer, keylen);
186	kzfree(buffer);
187	return ret;
188}
189
190int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
191			unsigned int keylen)
192{
193	unsigned long alignmask = crypto_ahash_alignmask(tfm);
 
194
195	if ((unsigned long)key & alignmask)
196		return ahash_setkey_unaligned(tfm, key, keylen);
 
 
 
 
 
197
198	return tfm->setkey(tfm, key, keylen);
 
199}
200EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
201
202static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
203			  unsigned int keylen)
204{
205	return -ENOSYS;
206}
207
208static inline unsigned int ahash_align_buffer_size(unsigned len,
209						   unsigned long mask)
210{
211	return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
212}
213
214static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
215{
216	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
217	unsigned long alignmask = crypto_ahash_alignmask(tfm);
218	unsigned int ds = crypto_ahash_digestsize(tfm);
219	struct ahash_request_priv *priv;
220
221	priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
222		       (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
223		       GFP_KERNEL : GFP_ATOMIC);
224	if (!priv)
225		return -ENOMEM;
226
227	/*
228	 * WARNING: Voodoo programming below!
229	 *
230	 * The code below is obscure and hard to understand, thus explanation
231	 * is necessary. See include/crypto/hash.h and include/linux/crypto.h
232	 * to understand the layout of structures used here!
233	 *
234	 * The code here will replace portions of the ORIGINAL request with
235	 * pointers to new code and buffers so the hashing operation can store
236	 * the result in aligned buffer. We will call the modified request
237	 * an ADJUSTED request.
238	 *
239	 * The newly mangled request will look as such:
240	 *
241	 * req {
242	 *   .result        = ADJUSTED[new aligned buffer]
243	 *   .base.complete = ADJUSTED[pointer to completion function]
244	 *   .base.data     = ADJUSTED[*req (pointer to self)]
245	 *   .priv          = ADJUSTED[new priv] {
246	 *           .result   = ORIGINAL(result)
247	 *           .complete = ORIGINAL(base.complete)
248	 *           .data     = ORIGINAL(base.data)
249	 *   }
250	 */
251
252	priv->result = req->result;
253	priv->complete = req->base.complete;
254	priv->data = req->base.data;
 
 
255	/*
256	 * WARNING: We do not backup req->priv here! The req->priv
257	 *          is for internal use of the Crypto API and the
258	 *          user must _NOT_ _EVER_ depend on it's content!
259	 */
260
261	req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
262	req->base.complete = cplt;
263	req->base.data = req;
264	req->priv = priv;
265
266	return 0;
267}
268
269static void ahash_restore_req(struct ahash_request *req)
270{
271	struct ahash_request_priv *priv = req->priv;
272
 
 
 
 
273	/* Restore the original crypto request. */
274	req->result = priv->result;
275	req->base.complete = priv->complete;
276	req->base.data = priv->data;
 
277	req->priv = NULL;
278
279	/* Free the req->priv.priv from the ADJUSTED request. */
280	kzfree(priv);
281}
282
283static void ahash_op_unaligned_finish(struct ahash_request *req, int err)
284{
285	struct ahash_request_priv *priv = req->priv;
 
286
287	if (err == -EINPROGRESS)
288		return;
289
290	if (!err)
291		memcpy(priv->result, req->result,
292		       crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
293
294	ahash_restore_req(req);
295}
296
297static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
298{
299	struct ahash_request *areq = req->data;
300
 
 
 
 
 
301	/*
302	 * Restore the original request, see ahash_op_unaligned() for what
303	 * goes where.
304	 *
305	 * The "struct ahash_request *req" here is in fact the "req.base"
306	 * from the ADJUSTED request from ahash_op_unaligned(), thus as it
307	 * is a pointer to self, it is also the ADJUSTED "req" .
308	 */
309
310	/* First copy req->result into req->priv.result */
311	ahash_op_unaligned_finish(areq, err);
312
313	/* Complete the ORIGINAL request. */
314	areq->base.complete(&areq->base, err);
315}
316
317static int ahash_op_unaligned(struct ahash_request *req,
318			      int (*op)(struct ahash_request *))
319{
320	int err;
321
322	err = ahash_save_req(req, ahash_op_unaligned_done);
323	if (err)
324		return err;
325
326	err = op(req);
327	ahash_op_unaligned_finish(req, err);
 
 
 
328
329	return err;
330}
331
332static int crypto_ahash_op(struct ahash_request *req,
333			   int (*op)(struct ahash_request *))
334{
335	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
336	unsigned long alignmask = crypto_ahash_alignmask(tfm);
337
338	if ((unsigned long)req->result & alignmask)
339		return ahash_op_unaligned(req, op);
340
341	return op(req);
342}
343
344int crypto_ahash_final(struct ahash_request *req)
345{
346	return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
347}
348EXPORT_SYMBOL_GPL(crypto_ahash_final);
349
350int crypto_ahash_finup(struct ahash_request *req)
351{
352	return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
353}
354EXPORT_SYMBOL_GPL(crypto_ahash_finup);
355
356int crypto_ahash_digest(struct ahash_request *req)
357{
358	return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest);
359}
360EXPORT_SYMBOL_GPL(crypto_ahash_digest);
361
362static void ahash_def_finup_finish2(struct ahash_request *req, int err)
363{
364	struct ahash_request_priv *priv = req->priv;
365
366	if (err == -EINPROGRESS)
367		return;
368
369	if (!err)
370		memcpy(priv->result, req->result,
371		       crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
372
373	ahash_restore_req(req);
374}
 
375
376static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
377{
378	struct ahash_request *areq = req->data;
379
380	ahash_def_finup_finish2(areq, err);
 
 
 
381
382	areq->base.complete(&areq->base, err);
383}
384
385static int ahash_def_finup_finish1(struct ahash_request *req, int err)
386{
387	if (err)
388		goto out;
389
390	req->base.complete = ahash_def_finup_done2;
391	req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
392	err = crypto_ahash_reqtfm(req)->final(req);
 
 
393
394out:
395	ahash_def_finup_finish2(req, err);
396	return err;
397}
398
399static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
400{
401	struct ahash_request *areq = req->data;
402
 
 
 
 
 
 
 
403	err = ahash_def_finup_finish1(areq, err);
 
 
404
405	areq->base.complete(&areq->base, err);
406}
407
408static int ahash_def_finup(struct ahash_request *req)
409{
410	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
411	int err;
412
413	err = ahash_save_req(req, ahash_def_finup_done1);
414	if (err)
415		return err;
416
417	err = tfm->update(req);
418	return ahash_def_finup_finish1(req, err);
419}
420
421static int ahash_no_export(struct ahash_request *req, void *out)
422{
423	return -ENOSYS;
424}
425
426static int ahash_no_import(struct ahash_request *req, const void *in)
427{
428	return -ENOSYS;
429}
430
431static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
432{
433	struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
434	struct ahash_alg *alg = crypto_ahash_alg(hash);
435
436	hash->setkey = ahash_nosetkey;
437	hash->has_setkey = false;
438	hash->export = ahash_no_export;
439	hash->import = ahash_no_import;
440
441	if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
442		return crypto_init_shash_ops_async(tfm);
443
444	hash->init = alg->init;
445	hash->update = alg->update;
446	hash->final = alg->final;
447	hash->finup = alg->finup ?: ahash_def_finup;
448	hash->digest = alg->digest;
 
 
449
450	if (alg->setkey) {
451		hash->setkey = alg->setkey;
452		hash->has_setkey = true;
 
453	}
454	if (alg->export)
455		hash->export = alg->export;
456	if (alg->import)
457		hash->import = alg->import;
458
459	return 0;
460}
461
462static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
463{
464	if (alg->cra_type == &crypto_ahash_type)
465		return alg->cra_ctxsize;
466
467	return sizeof(struct crypto_shash *);
468}
469
470#ifdef CONFIG_NET
471static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
472{
473	struct crypto_report_hash rhash;
474
475	strncpy(rhash.type, "ahash", sizeof(rhash.type));
476
477	rhash.blocksize = alg->cra_blocksize;
478	rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
479
480	if (nla_put(skb, CRYPTOCFGA_REPORT_HASH,
481		    sizeof(struct crypto_report_hash), &rhash))
482		goto nla_put_failure;
483	return 0;
484
485nla_put_failure:
486	return -EMSGSIZE;
487}
488#else
489static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
490{
491	return -ENOSYS;
492}
493#endif
494
495static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
496	__attribute__ ((unused));
497static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
498{
499	seq_printf(m, "type         : ahash\n");
500	seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
501					     "yes" : "no");
502	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
503	seq_printf(m, "digestsize   : %u\n",
504		   __crypto_hash_alg_common(alg)->digestsize);
505}
506
507const struct crypto_type crypto_ahash_type = {
508	.extsize = crypto_ahash_extsize,
509	.init_tfm = crypto_ahash_init_tfm,
510#ifdef CONFIG_PROC_FS
511	.show = crypto_ahash_show,
512#endif
513	.report = crypto_ahash_report,
514	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
515	.maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
516	.type = CRYPTO_ALG_TYPE_AHASH,
517	.tfmsize = offsetof(struct crypto_ahash, base),
518};
519EXPORT_SYMBOL_GPL(crypto_ahash_type);
520
521struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
522					u32 mask)
523{
524	return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
525}
526EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
527
528int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
529{
530	return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask);
531}
532EXPORT_SYMBOL_GPL(crypto_has_ahash);
533
534static int ahash_prepare_alg(struct ahash_alg *alg)
535{
536	struct crypto_alg *base = &alg->halg.base;
537
538	if (alg->halg.digestsize > PAGE_SIZE / 8 ||
539	    alg->halg.statesize > PAGE_SIZE / 8 ||
540	    alg->halg.statesize == 0)
541		return -EINVAL;
542
543	base->cra_type = &crypto_ahash_type;
544	base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
545	base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
546
547	return 0;
548}
549
550int crypto_register_ahash(struct ahash_alg *alg)
551{
552	struct crypto_alg *base = &alg->halg.base;
553	int err;
554
555	err = ahash_prepare_alg(alg);
556	if (err)
557		return err;
558
559	return crypto_register_alg(base);
560}
561EXPORT_SYMBOL_GPL(crypto_register_ahash);
562
563int crypto_unregister_ahash(struct ahash_alg *alg)
564{
565	return crypto_unregister_alg(&alg->halg.base);
566}
567EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
568
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
569int ahash_register_instance(struct crypto_template *tmpl,
570			    struct ahash_instance *inst)
571{
572	int err;
573
574	err = ahash_prepare_alg(&inst->alg);
575	if (err)
576		return err;
577
578	return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
579}
580EXPORT_SYMBOL_GPL(ahash_register_instance);
581
582void ahash_free_instance(struct crypto_instance *inst)
583{
584	crypto_drop_spawn(crypto_instance_ctx(inst));
585	kfree(ahash_instance(inst));
586}
587EXPORT_SYMBOL_GPL(ahash_free_instance);
588
589int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
590			    struct hash_alg_common *alg,
591			    struct crypto_instance *inst)
592{
593	return crypto_init_spawn2(&spawn->base, &alg->base, inst,
594				  &crypto_ahash_type);
595}
596EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn);
597
598struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
599{
600	struct crypto_alg *alg;
601
602	alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask);
603	return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg);
604}
605EXPORT_SYMBOL_GPL(ahash_attr_alg);
 
 
 
 
 
 
 
 
 
 
 
606
607MODULE_LICENSE("GPL");
608MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
v4.17
  1/*
  2 * Asynchronous Cryptographic Hash operations.
  3 *
  4 * This is the asynchronous version of hash.c with notification of
  5 * completion via a callback.
  6 *
  7 * Copyright (c) 2008 Loc Ho <lho@amcc.com>
  8 *
  9 * This program is free software; you can redistribute it and/or modify it
 10 * under the terms of the GNU General Public License as published by the Free
 11 * Software Foundation; either version 2 of the License, or (at your option)
 12 * any later version.
 13 *
 14 */
 15
 16#include <crypto/internal/hash.h>
 17#include <crypto/scatterwalk.h>
 18#include <linux/bug.h>
 19#include <linux/err.h>
 20#include <linux/kernel.h>
 21#include <linux/module.h>
 22#include <linux/sched.h>
 23#include <linux/slab.h>
 24#include <linux/seq_file.h>
 25#include <linux/cryptouser.h>
 26#include <linux/compiler.h>
 27#include <net/netlink.h>
 28
 29#include "internal.h"
 30
 31struct ahash_request_priv {
 32	crypto_completion_t complete;
 33	void *data;
 34	u8 *result;
 35	u32 flags;
 36	void *ubuf[] CRYPTO_MINALIGN_ATTR;
 37};
 38
 39static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
 40{
 41	return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
 42			    halg);
 43}
 44
 45static int hash_walk_next(struct crypto_hash_walk *walk)
 46{
 47	unsigned int alignmask = walk->alignmask;
 48	unsigned int offset = walk->offset;
 49	unsigned int nbytes = min(walk->entrylen,
 50				  ((unsigned int)(PAGE_SIZE)) - offset);
 51
 52	if (walk->flags & CRYPTO_ALG_ASYNC)
 53		walk->data = kmap(walk->pg);
 54	else
 55		walk->data = kmap_atomic(walk->pg);
 56	walk->data += offset;
 57
 58	if (offset & alignmask) {
 59		unsigned int unaligned = alignmask + 1 - (offset & alignmask);
 60
 61		if (nbytes > unaligned)
 62			nbytes = unaligned;
 63	}
 64
 65	walk->entrylen -= nbytes;
 66	return nbytes;
 67}
 68
 69static int hash_walk_new_entry(struct crypto_hash_walk *walk)
 70{
 71	struct scatterlist *sg;
 72
 73	sg = walk->sg;
 74	walk->offset = sg->offset;
 75	walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
 76	walk->offset = offset_in_page(walk->offset);
 77	walk->entrylen = sg->length;
 78
 79	if (walk->entrylen > walk->total)
 80		walk->entrylen = walk->total;
 81	walk->total -= walk->entrylen;
 82
 83	return hash_walk_next(walk);
 84}
 85
 86int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
 87{
 88	unsigned int alignmask = walk->alignmask;
 89	unsigned int nbytes = walk->entrylen;
 90
 91	walk->data -= walk->offset;
 92
 93	if (nbytes && walk->offset & alignmask && !err) {
 94		walk->offset = ALIGN(walk->offset, alignmask + 1);
 
 
 95		nbytes = min(nbytes,
 96			     ((unsigned int)(PAGE_SIZE)) - walk->offset);
 97		walk->entrylen -= nbytes;
 98
 99		if (nbytes) {
100			walk->data += walk->offset;
101			return nbytes;
102		}
103	}
104
105	if (walk->flags & CRYPTO_ALG_ASYNC)
106		kunmap(walk->pg);
107	else {
108		kunmap_atomic(walk->data);
109		/*
110		 * The may sleep test only makes sense for sync users.
111		 * Async users don't need to sleep here anyway.
112		 */
113		crypto_yield(walk->flags);
114	}
115
116	if (err)
117		return err;
118
119	if (nbytes) {
120		walk->offset = 0;
121		walk->pg++;
122		return hash_walk_next(walk);
123	}
124
125	if (!walk->total)
126		return 0;
127
128	walk->sg = sg_next(walk->sg);
129
130	return hash_walk_new_entry(walk);
131}
132EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
133
134int crypto_hash_walk_first(struct ahash_request *req,
135			   struct crypto_hash_walk *walk)
136{
137	walk->total = req->nbytes;
138
139	if (!walk->total) {
140		walk->entrylen = 0;
141		return 0;
142	}
143
144	walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
145	walk->sg = req->src;
146	walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
147
148	return hash_walk_new_entry(walk);
149}
150EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
151
152int crypto_ahash_walk_first(struct ahash_request *req,
153			    struct crypto_hash_walk *walk)
154{
155	walk->total = req->nbytes;
156
157	if (!walk->total) {
158		walk->entrylen = 0;
159		return 0;
160	}
161
162	walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
163	walk->sg = req->src;
164	walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
165	walk->flags |= CRYPTO_ALG_ASYNC;
166
167	BUILD_BUG_ON(CRYPTO_TFM_REQ_MASK & CRYPTO_ALG_ASYNC);
168
169	return hash_walk_new_entry(walk);
170}
171EXPORT_SYMBOL_GPL(crypto_ahash_walk_first);
172
173static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
174				unsigned int keylen)
175{
176	unsigned long alignmask = crypto_ahash_alignmask(tfm);
177	int ret;
178	u8 *buffer, *alignbuffer;
179	unsigned long absize;
180
181	absize = keylen + alignmask;
182	buffer = kmalloc(absize, GFP_KERNEL);
183	if (!buffer)
184		return -ENOMEM;
185
186	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
187	memcpy(alignbuffer, key, keylen);
188	ret = tfm->setkey(tfm, alignbuffer, keylen);
189	kzfree(buffer);
190	return ret;
191}
192
193int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
194			unsigned int keylen)
195{
196	unsigned long alignmask = crypto_ahash_alignmask(tfm);
197	int err;
198
199	if ((unsigned long)key & alignmask)
200		err = ahash_setkey_unaligned(tfm, key, keylen);
201	else
202		err = tfm->setkey(tfm, key, keylen);
203
204	if (err)
205		return err;
206
207	crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
208	return 0;
209}
210EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
211
212static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
213			  unsigned int keylen)
214{
215	return -ENOSYS;
216}
217
218static inline unsigned int ahash_align_buffer_size(unsigned len,
219						   unsigned long mask)
220{
221	return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
222}
223
224static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
225{
226	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
227	unsigned long alignmask = crypto_ahash_alignmask(tfm);
228	unsigned int ds = crypto_ahash_digestsize(tfm);
229	struct ahash_request_priv *priv;
230
231	priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
232		       (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
233		       GFP_KERNEL : GFP_ATOMIC);
234	if (!priv)
235		return -ENOMEM;
236
237	/*
238	 * WARNING: Voodoo programming below!
239	 *
240	 * The code below is obscure and hard to understand, thus explanation
241	 * is necessary. See include/crypto/hash.h and include/linux/crypto.h
242	 * to understand the layout of structures used here!
243	 *
244	 * The code here will replace portions of the ORIGINAL request with
245	 * pointers to new code and buffers so the hashing operation can store
246	 * the result in aligned buffer. We will call the modified request
247	 * an ADJUSTED request.
248	 *
249	 * The newly mangled request will look as such:
250	 *
251	 * req {
252	 *   .result        = ADJUSTED[new aligned buffer]
253	 *   .base.complete = ADJUSTED[pointer to completion function]
254	 *   .base.data     = ADJUSTED[*req (pointer to self)]
255	 *   .priv          = ADJUSTED[new priv] {
256	 *           .result   = ORIGINAL(result)
257	 *           .complete = ORIGINAL(base.complete)
258	 *           .data     = ORIGINAL(base.data)
259	 *   }
260	 */
261
262	priv->result = req->result;
263	priv->complete = req->base.complete;
264	priv->data = req->base.data;
265	priv->flags = req->base.flags;
266
267	/*
268	 * WARNING: We do not backup req->priv here! The req->priv
269	 *          is for internal use of the Crypto API and the
270	 *          user must _NOT_ _EVER_ depend on it's content!
271	 */
272
273	req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
274	req->base.complete = cplt;
275	req->base.data = req;
276	req->priv = priv;
277
278	return 0;
279}
280
281static void ahash_restore_req(struct ahash_request *req, int err)
282{
283	struct ahash_request_priv *priv = req->priv;
284
285	if (!err)
286		memcpy(priv->result, req->result,
287		       crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
288
289	/* Restore the original crypto request. */
290	req->result = priv->result;
291
292	ahash_request_set_callback(req, priv->flags,
293				   priv->complete, priv->data);
294	req->priv = NULL;
295
296	/* Free the req->priv.priv from the ADJUSTED request. */
297	kzfree(priv);
298}
299
300static void ahash_notify_einprogress(struct ahash_request *req)
301{
302	struct ahash_request_priv *priv = req->priv;
303	struct crypto_async_request oreq;
304
305	oreq.data = priv->data;
 
 
 
 
 
306
307	priv->complete(&oreq, -EINPROGRESS);
308}
309
310static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
311{
312	struct ahash_request *areq = req->data;
313
314	if (err == -EINPROGRESS) {
315		ahash_notify_einprogress(areq);
316		return;
317	}
318
319	/*
320	 * Restore the original request, see ahash_op_unaligned() for what
321	 * goes where.
322	 *
323	 * The "struct ahash_request *req" here is in fact the "req.base"
324	 * from the ADJUSTED request from ahash_op_unaligned(), thus as it
325	 * is a pointer to self, it is also the ADJUSTED "req" .
326	 */
327
328	/* First copy req->result into req->priv.result */
329	ahash_restore_req(areq, err);
330
331	/* Complete the ORIGINAL request. */
332	areq->base.complete(&areq->base, err);
333}
334
335static int ahash_op_unaligned(struct ahash_request *req,
336			      int (*op)(struct ahash_request *))
337{
338	int err;
339
340	err = ahash_save_req(req, ahash_op_unaligned_done);
341	if (err)
342		return err;
343
344	err = op(req);
345	if (err == -EINPROGRESS || err == -EBUSY)
346		return err;
347
348	ahash_restore_req(req, err);
349
350	return err;
351}
352
353static int crypto_ahash_op(struct ahash_request *req,
354			   int (*op)(struct ahash_request *))
355{
356	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
357	unsigned long alignmask = crypto_ahash_alignmask(tfm);
358
359	if ((unsigned long)req->result & alignmask)
360		return ahash_op_unaligned(req, op);
361
362	return op(req);
363}
364
365int crypto_ahash_final(struct ahash_request *req)
366{
367	return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
368}
369EXPORT_SYMBOL_GPL(crypto_ahash_final);
370
371int crypto_ahash_finup(struct ahash_request *req)
372{
373	return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
374}
375EXPORT_SYMBOL_GPL(crypto_ahash_finup);
376
377int crypto_ahash_digest(struct ahash_request *req)
378{
379	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 
 
 
 
 
 
380
381	if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
382		return -ENOKEY;
383
384	return crypto_ahash_op(req, tfm->digest);
 
 
 
 
385}
386EXPORT_SYMBOL_GPL(crypto_ahash_digest);
387
388static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
389{
390	struct ahash_request *areq = req->data;
391
392	if (err == -EINPROGRESS)
393		return;
394
395	ahash_restore_req(areq, err);
396
397	areq->base.complete(&areq->base, err);
398}
399
400static int ahash_def_finup_finish1(struct ahash_request *req, int err)
401{
402	if (err)
403		goto out;
404
405	req->base.complete = ahash_def_finup_done2;
406
407	err = crypto_ahash_reqtfm(req)->final(req);
408	if (err == -EINPROGRESS || err == -EBUSY)
409		return err;
410
411out:
412	ahash_restore_req(req, err);
413	return err;
414}
415
416static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
417{
418	struct ahash_request *areq = req->data;
419
420	if (err == -EINPROGRESS) {
421		ahash_notify_einprogress(areq);
422		return;
423	}
424
425	areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
426
427	err = ahash_def_finup_finish1(areq, err);
428	if (areq->priv)
429		return;
430
431	areq->base.complete(&areq->base, err);
432}
433
434static int ahash_def_finup(struct ahash_request *req)
435{
436	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
437	int err;
438
439	err = ahash_save_req(req, ahash_def_finup_done1);
440	if (err)
441		return err;
442
443	err = tfm->update(req);
444	if (err == -EINPROGRESS || err == -EBUSY)
445		return err;
 
 
 
 
 
446
447	return ahash_def_finup_finish1(req, err);
 
 
448}
449
450static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
451{
452	struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
453	struct ahash_alg *alg = crypto_ahash_alg(hash);
454
455	hash->setkey = ahash_nosetkey;
 
 
 
456
457	if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
458		return crypto_init_shash_ops_async(tfm);
459
460	hash->init = alg->init;
461	hash->update = alg->update;
462	hash->final = alg->final;
463	hash->finup = alg->finup ?: ahash_def_finup;
464	hash->digest = alg->digest;
465	hash->export = alg->export;
466	hash->import = alg->import;
467
468	if (alg->setkey) {
469		hash->setkey = alg->setkey;
470		if (!(alg->halg.base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
471			crypto_ahash_set_flags(hash, CRYPTO_TFM_NEED_KEY);
472	}
 
 
 
 
473
474	return 0;
475}
476
477static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
478{
479	if (alg->cra_type != &crypto_ahash_type)
480		return sizeof(struct crypto_shash *);
481
482	return crypto_alg_extsize(alg);
483}
484
485#ifdef CONFIG_NET
486static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
487{
488	struct crypto_report_hash rhash;
489
490	strncpy(rhash.type, "ahash", sizeof(rhash.type));
491
492	rhash.blocksize = alg->cra_blocksize;
493	rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
494
495	if (nla_put(skb, CRYPTOCFGA_REPORT_HASH,
496		    sizeof(struct crypto_report_hash), &rhash))
497		goto nla_put_failure;
498	return 0;
499
500nla_put_failure:
501	return -EMSGSIZE;
502}
503#else
504static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
505{
506	return -ENOSYS;
507}
508#endif
509
510static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
511	__maybe_unused;
512static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
513{
514	seq_printf(m, "type         : ahash\n");
515	seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
516					     "yes" : "no");
517	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
518	seq_printf(m, "digestsize   : %u\n",
519		   __crypto_hash_alg_common(alg)->digestsize);
520}
521
522const struct crypto_type crypto_ahash_type = {
523	.extsize = crypto_ahash_extsize,
524	.init_tfm = crypto_ahash_init_tfm,
525#ifdef CONFIG_PROC_FS
526	.show = crypto_ahash_show,
527#endif
528	.report = crypto_ahash_report,
529	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
530	.maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
531	.type = CRYPTO_ALG_TYPE_AHASH,
532	.tfmsize = offsetof(struct crypto_ahash, base),
533};
534EXPORT_SYMBOL_GPL(crypto_ahash_type);
535
536struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
537					u32 mask)
538{
539	return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
540}
541EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
542
543int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
544{
545	return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask);
546}
547EXPORT_SYMBOL_GPL(crypto_has_ahash);
548
549static int ahash_prepare_alg(struct ahash_alg *alg)
550{
551	struct crypto_alg *base = &alg->halg.base;
552
553	if (alg->halg.digestsize > PAGE_SIZE / 8 ||
554	    alg->halg.statesize > PAGE_SIZE / 8 ||
555	    alg->halg.statesize == 0)
556		return -EINVAL;
557
558	base->cra_type = &crypto_ahash_type;
559	base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
560	base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
561
562	return 0;
563}
564
565int crypto_register_ahash(struct ahash_alg *alg)
566{
567	struct crypto_alg *base = &alg->halg.base;
568	int err;
569
570	err = ahash_prepare_alg(alg);
571	if (err)
572		return err;
573
574	return crypto_register_alg(base);
575}
576EXPORT_SYMBOL_GPL(crypto_register_ahash);
577
578int crypto_unregister_ahash(struct ahash_alg *alg)
579{
580	return crypto_unregister_alg(&alg->halg.base);
581}
582EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
583
584int crypto_register_ahashes(struct ahash_alg *algs, int count)
585{
586	int i, ret;
587
588	for (i = 0; i < count; i++) {
589		ret = crypto_register_ahash(&algs[i]);
590		if (ret)
591			goto err;
592	}
593
594	return 0;
595
596err:
597	for (--i; i >= 0; --i)
598		crypto_unregister_ahash(&algs[i]);
599
600	return ret;
601}
602EXPORT_SYMBOL_GPL(crypto_register_ahashes);
603
604void crypto_unregister_ahashes(struct ahash_alg *algs, int count)
605{
606	int i;
607
608	for (i = count - 1; i >= 0; --i)
609		crypto_unregister_ahash(&algs[i]);
610}
611EXPORT_SYMBOL_GPL(crypto_unregister_ahashes);
612
613int ahash_register_instance(struct crypto_template *tmpl,
614			    struct ahash_instance *inst)
615{
616	int err;
617
618	err = ahash_prepare_alg(&inst->alg);
619	if (err)
620		return err;
621
622	return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
623}
624EXPORT_SYMBOL_GPL(ahash_register_instance);
625
626void ahash_free_instance(struct crypto_instance *inst)
627{
628	crypto_drop_spawn(crypto_instance_ctx(inst));
629	kfree(ahash_instance(inst));
630}
631EXPORT_SYMBOL_GPL(ahash_free_instance);
632
633int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
634			    struct hash_alg_common *alg,
635			    struct crypto_instance *inst)
636{
637	return crypto_init_spawn2(&spawn->base, &alg->base, inst,
638				  &crypto_ahash_type);
639}
640EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn);
641
642struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask)
643{
644	struct crypto_alg *alg;
645
646	alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask);
647	return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg);
648}
649EXPORT_SYMBOL_GPL(ahash_attr_alg);
650
651bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
652{
653	struct crypto_alg *alg = &halg->base;
654
655	if (alg->cra_type != &crypto_ahash_type)
656		return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg));
657
658	return __crypto_ahash_alg(alg)->setkey != NULL;
659}
660EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey);
661
662MODULE_LICENSE("GPL");
663MODULE_DESCRIPTION("Asynchronous cryptographic hash type");