Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Asynchronous Cryptographic Hash operations.
  4 *
  5 * This is the asynchronous version of hash.c with notification of
  6 * completion via a callback.
  7 *
  8 * Copyright (c) 2008 Loc Ho <lho@amcc.com>
  9 */
 10
 11#include <crypto/internal/hash.h>
 12#include <crypto/scatterwalk.h>
 
 13#include <linux/err.h>
 14#include <linux/kernel.h>
 15#include <linux/module.h>
 16#include <linux/sched.h>
 17#include <linux/slab.h>
 18#include <linux/seq_file.h>
 19#include <linux/cryptouser.h>
 20#include <linux/compiler.h>
 21#include <net/netlink.h>
 22
 23#include "internal.h"
 24
 25static const struct crypto_type crypto_ahash_type;
 26
 27struct ahash_request_priv {
 28	crypto_completion_t complete;
 29	void *data;
 30	u8 *result;
 31	u32 flags;
 32	void *ubuf[] CRYPTO_MINALIGN_ATTR;
 33};
 34
 35static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
 36{
 37	return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
 38			    halg);
 39}
 40
 41static int hash_walk_next(struct crypto_hash_walk *walk)
 42{
 43	unsigned int alignmask = walk->alignmask;
 44	unsigned int offset = walk->offset;
 45	unsigned int nbytes = min(walk->entrylen,
 46				  ((unsigned int)(PAGE_SIZE)) - offset);
 47
 48	walk->data = kmap_atomic(walk->pg);
 
 
 
 49	walk->data += offset;
 50
 51	if (offset & alignmask) {
 52		unsigned int unaligned = alignmask + 1 - (offset & alignmask);
 53
 54		if (nbytes > unaligned)
 55			nbytes = unaligned;
 56	}
 57
 58	walk->entrylen -= nbytes;
 59	return nbytes;
 60}
 61
 62static int hash_walk_new_entry(struct crypto_hash_walk *walk)
 63{
 64	struct scatterlist *sg;
 65
 66	sg = walk->sg;
 67	walk->offset = sg->offset;
 68	walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
 69	walk->offset = offset_in_page(walk->offset);
 70	walk->entrylen = sg->length;
 71
 72	if (walk->entrylen > walk->total)
 73		walk->entrylen = walk->total;
 74	walk->total -= walk->entrylen;
 75
 76	return hash_walk_next(walk);
 77}
 78
 79int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
 80{
 81	unsigned int alignmask = walk->alignmask;
 82
 83	walk->data -= walk->offset;
 84
 85	if (walk->entrylen && (walk->offset & alignmask) && !err) {
 86		unsigned int nbytes;
 87
 88		walk->offset = ALIGN(walk->offset, alignmask + 1);
 89		nbytes = min(walk->entrylen,
 90			     (unsigned int)(PAGE_SIZE - walk->offset));
 91		if (nbytes) {
 92			walk->entrylen -= nbytes;
 93			walk->data += walk->offset;
 94			return nbytes;
 95		}
 96	}
 97
 98	kunmap_atomic(walk->data);
 99	crypto_yield(walk->flags);
 
 
 
 
 
 
 
 
100
101	if (err)
102		return err;
103
104	if (walk->entrylen) {
105		walk->offset = 0;
106		walk->pg++;
107		return hash_walk_next(walk);
108	}
109
110	if (!walk->total)
111		return 0;
112
113	walk->sg = sg_next(walk->sg);
114
115	return hash_walk_new_entry(walk);
116}
117EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
118
119int crypto_hash_walk_first(struct ahash_request *req,
120			   struct crypto_hash_walk *walk)
121{
122	walk->total = req->nbytes;
123
124	if (!walk->total) {
125		walk->entrylen = 0;
126		return 0;
127	}
128
129	walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
130	walk->sg = req->src;
131	walk->flags = req->base.flags;
132
133	return hash_walk_new_entry(walk);
134}
135EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
136
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
137static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
138				unsigned int keylen)
139{
140	unsigned long alignmask = crypto_ahash_alignmask(tfm);
141	int ret;
142	u8 *buffer, *alignbuffer;
143	unsigned long absize;
144
145	absize = keylen + alignmask;
146	buffer = kmalloc(absize, GFP_KERNEL);
147	if (!buffer)
148		return -ENOMEM;
149
150	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
151	memcpy(alignbuffer, key, keylen);
152	ret = tfm->setkey(tfm, alignbuffer, keylen);
153	kfree_sensitive(buffer);
154	return ret;
155}
156
157static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
158			  unsigned int keylen)
159{
160	return -ENOSYS;
161}
162
163static void ahash_set_needkey(struct crypto_ahash *tfm)
164{
165	const struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
166
167	if (tfm->setkey != ahash_nosetkey &&
168	    !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
169		crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
170}
171
172int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
173			unsigned int keylen)
174{
175	unsigned long alignmask = crypto_ahash_alignmask(tfm);
176	int err;
177
178	if ((unsigned long)key & alignmask)
179		err = ahash_setkey_unaligned(tfm, key, keylen);
180	else
181		err = tfm->setkey(tfm, key, keylen);
182
183	if (unlikely(err)) {
184		ahash_set_needkey(tfm);
185		return err;
186	}
187
188	crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
189	return 0;
190}
191EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
192
193static inline unsigned int ahash_align_buffer_size(unsigned len,
194						   unsigned long mask)
195{
196	return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
197}
198
199static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
200{
201	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
202	unsigned long alignmask = crypto_ahash_alignmask(tfm);
203	unsigned int ds = crypto_ahash_digestsize(tfm);
204	struct ahash_request_priv *priv;
205
206	priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
207		       (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
208		       GFP_KERNEL : GFP_ATOMIC);
209	if (!priv)
210		return -ENOMEM;
211
212	/*
213	 * WARNING: Voodoo programming below!
214	 *
215	 * The code below is obscure and hard to understand, thus explanation
216	 * is necessary. See include/crypto/hash.h and include/linux/crypto.h
217	 * to understand the layout of structures used here!
218	 *
219	 * The code here will replace portions of the ORIGINAL request with
220	 * pointers to new code and buffers so the hashing operation can store
221	 * the result in aligned buffer. We will call the modified request
222	 * an ADJUSTED request.
223	 *
224	 * The newly mangled request will look as such:
225	 *
226	 * req {
227	 *   .result        = ADJUSTED[new aligned buffer]
228	 *   .base.complete = ADJUSTED[pointer to completion function]
229	 *   .base.data     = ADJUSTED[*req (pointer to self)]
230	 *   .priv          = ADJUSTED[new priv] {
231	 *           .result   = ORIGINAL(result)
232	 *           .complete = ORIGINAL(base.complete)
233	 *           .data     = ORIGINAL(base.data)
234	 *   }
235	 */
236
237	priv->result = req->result;
238	priv->complete = req->base.complete;
239	priv->data = req->base.data;
240	priv->flags = req->base.flags;
241
242	/*
243	 * WARNING: We do not backup req->priv here! The req->priv
244	 *          is for internal use of the Crypto API and the
245	 *          user must _NOT_ _EVER_ depend on it's content!
246	 */
247
248	req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
249	req->base.complete = cplt;
250	req->base.data = req;
251	req->priv = priv;
252
253	return 0;
254}
255
256static void ahash_restore_req(struct ahash_request *req, int err)
257{
258	struct ahash_request_priv *priv = req->priv;
259
260	if (!err)
261		memcpy(priv->result, req->result,
262		       crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
263
264	/* Restore the original crypto request. */
265	req->result = priv->result;
266
267	ahash_request_set_callback(req, priv->flags,
268				   priv->complete, priv->data);
269	req->priv = NULL;
270
271	/* Free the req->priv.priv from the ADJUSTED request. */
272	kfree_sensitive(priv);
273}
274
275static void ahash_notify_einprogress(struct ahash_request *req)
276{
277	struct ahash_request_priv *priv = req->priv;
278	struct crypto_async_request oreq;
279
280	oreq.data = priv->data;
281
282	priv->complete(&oreq, -EINPROGRESS);
283}
284
285static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
286{
287	struct ahash_request *areq = req->data;
288
289	if (err == -EINPROGRESS) {
290		ahash_notify_einprogress(areq);
291		return;
292	}
293
294	/*
295	 * Restore the original request, see ahash_op_unaligned() for what
296	 * goes where.
297	 *
298	 * The "struct ahash_request *req" here is in fact the "req.base"
299	 * from the ADJUSTED request from ahash_op_unaligned(), thus as it
300	 * is a pointer to self, it is also the ADJUSTED "req" .
301	 */
302
303	/* First copy req->result into req->priv.result */
304	ahash_restore_req(areq, err);
305
306	/* Complete the ORIGINAL request. */
307	areq->base.complete(&areq->base, err);
308}
309
310static int ahash_op_unaligned(struct ahash_request *req,
311			      int (*op)(struct ahash_request *))
312{
313	int err;
314
315	err = ahash_save_req(req, ahash_op_unaligned_done);
316	if (err)
317		return err;
318
319	err = op(req);
320	if (err == -EINPROGRESS || err == -EBUSY)
321		return err;
322
323	ahash_restore_req(req, err);
324
325	return err;
326}
327
328static int crypto_ahash_op(struct ahash_request *req,
329			   int (*op)(struct ahash_request *))
330{
331	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
332	unsigned long alignmask = crypto_ahash_alignmask(tfm);
333
334	if ((unsigned long)req->result & alignmask)
335		return ahash_op_unaligned(req, op);
336
337	return op(req);
338}
339
340int crypto_ahash_final(struct ahash_request *req)
341{
342	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
343	struct crypto_alg *alg = tfm->base.__crt_alg;
344	unsigned int nbytes = req->nbytes;
345	int ret;
346
347	crypto_stats_get(alg);
348	ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
349	crypto_stats_ahash_final(nbytes, ret, alg);
350	return ret;
351}
352EXPORT_SYMBOL_GPL(crypto_ahash_final);
353
354int crypto_ahash_finup(struct ahash_request *req)
355{
356	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
357	struct crypto_alg *alg = tfm->base.__crt_alg;
358	unsigned int nbytes = req->nbytes;
359	int ret;
360
361	crypto_stats_get(alg);
362	ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
363	crypto_stats_ahash_final(nbytes, ret, alg);
364	return ret;
365}
366EXPORT_SYMBOL_GPL(crypto_ahash_finup);
367
368int crypto_ahash_digest(struct ahash_request *req)
369{
370	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
371	struct crypto_alg *alg = tfm->base.__crt_alg;
372	unsigned int nbytes = req->nbytes;
373	int ret;
374
375	crypto_stats_get(alg);
376	if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
377		ret = -ENOKEY;
378	else
379		ret = crypto_ahash_op(req, tfm->digest);
380	crypto_stats_ahash_final(nbytes, ret, alg);
381	return ret;
382}
383EXPORT_SYMBOL_GPL(crypto_ahash_digest);
384
385static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
386{
387	struct ahash_request *areq = req->data;
388
389	if (err == -EINPROGRESS)
390		return;
391
392	ahash_restore_req(areq, err);
393
394	areq->base.complete(&areq->base, err);
395}
396
397static int ahash_def_finup_finish1(struct ahash_request *req, int err)
398{
399	if (err)
400		goto out;
401
402	req->base.complete = ahash_def_finup_done2;
403
404	err = crypto_ahash_reqtfm(req)->final(req);
405	if (err == -EINPROGRESS || err == -EBUSY)
406		return err;
407
408out:
409	ahash_restore_req(req, err);
410	return err;
411}
412
413static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
414{
415	struct ahash_request *areq = req->data;
416
417	if (err == -EINPROGRESS) {
418		ahash_notify_einprogress(areq);
419		return;
420	}
421
422	areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
423
424	err = ahash_def_finup_finish1(areq, err);
425	if (areq->priv)
426		return;
427
428	areq->base.complete(&areq->base, err);
429}
430
431static int ahash_def_finup(struct ahash_request *req)
432{
433	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
434	int err;
435
436	err = ahash_save_req(req, ahash_def_finup_done1);
437	if (err)
438		return err;
439
440	err = tfm->update(req);
441	if (err == -EINPROGRESS || err == -EBUSY)
442		return err;
443
444	return ahash_def_finup_finish1(req, err);
445}
446
447static void crypto_ahash_exit_tfm(struct crypto_tfm *tfm)
448{
449	struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
450	struct ahash_alg *alg = crypto_ahash_alg(hash);
451
452	alg->exit_tfm(hash);
453}
454
455static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
456{
457	struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
458	struct ahash_alg *alg = crypto_ahash_alg(hash);
459
460	hash->setkey = ahash_nosetkey;
461
462	if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
463		return crypto_init_shash_ops_async(tfm);
464
465	hash->init = alg->init;
466	hash->update = alg->update;
467	hash->final = alg->final;
468	hash->finup = alg->finup ?: ahash_def_finup;
469	hash->digest = alg->digest;
470	hash->export = alg->export;
471	hash->import = alg->import;
472
473	if (alg->setkey) {
474		hash->setkey = alg->setkey;
475		ahash_set_needkey(hash);
476	}
477
478	if (alg->exit_tfm)
479		tfm->exit = crypto_ahash_exit_tfm;
480
481	return alg->init_tfm ? alg->init_tfm(hash) : 0;
482}
483
484static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
485{
486	if (alg->cra_type != &crypto_ahash_type)
487		return sizeof(struct crypto_shash *);
488
489	return crypto_alg_extsize(alg);
490}
491
492static void crypto_ahash_free_instance(struct crypto_instance *inst)
493{
494	struct ahash_instance *ahash = ahash_instance(inst);
495
496	ahash->free(ahash);
497}
498
499#ifdef CONFIG_NET
500static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
501{
502	struct crypto_report_hash rhash;
503
504	memset(&rhash, 0, sizeof(rhash));
505
506	strscpy(rhash.type, "ahash", sizeof(rhash.type));
507
508	rhash.blocksize = alg->cra_blocksize;
509	rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
510
511	return nla_put(skb, CRYPTOCFGA_REPORT_HASH, sizeof(rhash), &rhash);
512}
513#else
514static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
515{
516	return -ENOSYS;
517}
518#endif
519
520static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
521	__maybe_unused;
522static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
523{
524	seq_printf(m, "type         : ahash\n");
525	seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
526					     "yes" : "no");
527	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
528	seq_printf(m, "digestsize   : %u\n",
529		   __crypto_hash_alg_common(alg)->digestsize);
530}
531
532static const struct crypto_type crypto_ahash_type = {
533	.extsize = crypto_ahash_extsize,
534	.init_tfm = crypto_ahash_init_tfm,
535	.free = crypto_ahash_free_instance,
536#ifdef CONFIG_PROC_FS
537	.show = crypto_ahash_show,
538#endif
539	.report = crypto_ahash_report,
540	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
541	.maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
542	.type = CRYPTO_ALG_TYPE_AHASH,
543	.tfmsize = offsetof(struct crypto_ahash, base),
544};
545
546int crypto_grab_ahash(struct crypto_ahash_spawn *spawn,
547		      struct crypto_instance *inst,
548		      const char *name, u32 type, u32 mask)
549{
550	spawn->base.frontend = &crypto_ahash_type;
551	return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
552}
553EXPORT_SYMBOL_GPL(crypto_grab_ahash);
554
555struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
556					u32 mask)
557{
558	return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
559}
560EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
561
562int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
563{
564	return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask);
565}
566EXPORT_SYMBOL_GPL(crypto_has_ahash);
567
568static int ahash_prepare_alg(struct ahash_alg *alg)
569{
570	struct crypto_alg *base = &alg->halg.base;
571
572	if (alg->halg.digestsize > HASH_MAX_DIGESTSIZE ||
573	    alg->halg.statesize > HASH_MAX_STATESIZE ||
574	    alg->halg.statesize == 0)
575		return -EINVAL;
576
577	base->cra_type = &crypto_ahash_type;
578	base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
579	base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
580
581	return 0;
582}
583
584int crypto_register_ahash(struct ahash_alg *alg)
585{
586	struct crypto_alg *base = &alg->halg.base;
587	int err;
588
589	err = ahash_prepare_alg(alg);
590	if (err)
591		return err;
592
593	return crypto_register_alg(base);
594}
595EXPORT_SYMBOL_GPL(crypto_register_ahash);
596
597void crypto_unregister_ahash(struct ahash_alg *alg)
598{
599	crypto_unregister_alg(&alg->halg.base);
600}
601EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
602
603int crypto_register_ahashes(struct ahash_alg *algs, int count)
604{
605	int i, ret;
606
607	for (i = 0; i < count; i++) {
608		ret = crypto_register_ahash(&algs[i]);
609		if (ret)
610			goto err;
611	}
612
613	return 0;
614
615err:
616	for (--i; i >= 0; --i)
617		crypto_unregister_ahash(&algs[i]);
618
619	return ret;
620}
621EXPORT_SYMBOL_GPL(crypto_register_ahashes);
622
623void crypto_unregister_ahashes(struct ahash_alg *algs, int count)
624{
625	int i;
626
627	for (i = count - 1; i >= 0; --i)
628		crypto_unregister_ahash(&algs[i]);
629}
630EXPORT_SYMBOL_GPL(crypto_unregister_ahashes);
631
632int ahash_register_instance(struct crypto_template *tmpl,
633			    struct ahash_instance *inst)
634{
635	int err;
636
637	if (WARN_ON(!inst->free))
638		return -EINVAL;
639
640	err = ahash_prepare_alg(&inst->alg);
641	if (err)
642		return err;
643
644	return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
645}
646EXPORT_SYMBOL_GPL(ahash_register_instance);
647
648bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
649{
650	struct crypto_alg *alg = &halg->base;
651
652	if (alg->cra_type != &crypto_ahash_type)
653		return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg));
654
655	return __crypto_ahash_alg(alg)->setkey != NULL;
656}
657EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey);
658
659MODULE_LICENSE("GPL");
660MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
v5.9
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Asynchronous Cryptographic Hash operations.
  4 *
  5 * This is the asynchronous version of hash.c with notification of
  6 * completion via a callback.
  7 *
  8 * Copyright (c) 2008 Loc Ho <lho@amcc.com>
  9 */
 10
 11#include <crypto/internal/hash.h>
 12#include <crypto/scatterwalk.h>
 13#include <linux/bug.h>
 14#include <linux/err.h>
 15#include <linux/kernel.h>
 16#include <linux/module.h>
 17#include <linux/sched.h>
 18#include <linux/slab.h>
 19#include <linux/seq_file.h>
 20#include <linux/cryptouser.h>
 21#include <linux/compiler.h>
 22#include <net/netlink.h>
 23
 24#include "internal.h"
 25
 26static const struct crypto_type crypto_ahash_type;
 27
 28struct ahash_request_priv {
 29	crypto_completion_t complete;
 30	void *data;
 31	u8 *result;
 32	u32 flags;
 33	void *ubuf[] CRYPTO_MINALIGN_ATTR;
 34};
 35
 36static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash)
 37{
 38	return container_of(crypto_hash_alg_common(hash), struct ahash_alg,
 39			    halg);
 40}
 41
 42static int hash_walk_next(struct crypto_hash_walk *walk)
 43{
 44	unsigned int alignmask = walk->alignmask;
 45	unsigned int offset = walk->offset;
 46	unsigned int nbytes = min(walk->entrylen,
 47				  ((unsigned int)(PAGE_SIZE)) - offset);
 48
 49	if (walk->flags & CRYPTO_ALG_ASYNC)
 50		walk->data = kmap(walk->pg);
 51	else
 52		walk->data = kmap_atomic(walk->pg);
 53	walk->data += offset;
 54
 55	if (offset & alignmask) {
 56		unsigned int unaligned = alignmask + 1 - (offset & alignmask);
 57
 58		if (nbytes > unaligned)
 59			nbytes = unaligned;
 60	}
 61
 62	walk->entrylen -= nbytes;
 63	return nbytes;
 64}
 65
 66static int hash_walk_new_entry(struct crypto_hash_walk *walk)
 67{
 68	struct scatterlist *sg;
 69
 70	sg = walk->sg;
 71	walk->offset = sg->offset;
 72	walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
 73	walk->offset = offset_in_page(walk->offset);
 74	walk->entrylen = sg->length;
 75
 76	if (walk->entrylen > walk->total)
 77		walk->entrylen = walk->total;
 78	walk->total -= walk->entrylen;
 79
 80	return hash_walk_next(walk);
 81}
 82
 83int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
 84{
 85	unsigned int alignmask = walk->alignmask;
 86
 87	walk->data -= walk->offset;
 88
 89	if (walk->entrylen && (walk->offset & alignmask) && !err) {
 90		unsigned int nbytes;
 91
 92		walk->offset = ALIGN(walk->offset, alignmask + 1);
 93		nbytes = min(walk->entrylen,
 94			     (unsigned int)(PAGE_SIZE - walk->offset));
 95		if (nbytes) {
 96			walk->entrylen -= nbytes;
 97			walk->data += walk->offset;
 98			return nbytes;
 99		}
100	}
101
102	if (walk->flags & CRYPTO_ALG_ASYNC)
103		kunmap(walk->pg);
104	else {
105		kunmap_atomic(walk->data);
106		/*
107		 * The may sleep test only makes sense for sync users.
108		 * Async users don't need to sleep here anyway.
109		 */
110		crypto_yield(walk->flags);
111	}
112
113	if (err)
114		return err;
115
116	if (walk->entrylen) {
117		walk->offset = 0;
118		walk->pg++;
119		return hash_walk_next(walk);
120	}
121
122	if (!walk->total)
123		return 0;
124
125	walk->sg = sg_next(walk->sg);
126
127	return hash_walk_new_entry(walk);
128}
129EXPORT_SYMBOL_GPL(crypto_hash_walk_done);
130
131int crypto_hash_walk_first(struct ahash_request *req,
132			   struct crypto_hash_walk *walk)
133{
134	walk->total = req->nbytes;
135
136	if (!walk->total) {
137		walk->entrylen = 0;
138		return 0;
139	}
140
141	walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
142	walk->sg = req->src;
143	walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
144
145	return hash_walk_new_entry(walk);
146}
147EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
148
149int crypto_ahash_walk_first(struct ahash_request *req,
150			    struct crypto_hash_walk *walk)
151{
152	walk->total = req->nbytes;
153
154	if (!walk->total) {
155		walk->entrylen = 0;
156		return 0;
157	}
158
159	walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
160	walk->sg = req->src;
161	walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
162	walk->flags |= CRYPTO_ALG_ASYNC;
163
164	BUILD_BUG_ON(CRYPTO_TFM_REQ_MASK & CRYPTO_ALG_ASYNC);
165
166	return hash_walk_new_entry(walk);
167}
168EXPORT_SYMBOL_GPL(crypto_ahash_walk_first);
169
170static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
171				unsigned int keylen)
172{
173	unsigned long alignmask = crypto_ahash_alignmask(tfm);
174	int ret;
175	u8 *buffer, *alignbuffer;
176	unsigned long absize;
177
178	absize = keylen + alignmask;
179	buffer = kmalloc(absize, GFP_KERNEL);
180	if (!buffer)
181		return -ENOMEM;
182
183	alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
184	memcpy(alignbuffer, key, keylen);
185	ret = tfm->setkey(tfm, alignbuffer, keylen);
186	kfree_sensitive(buffer);
187	return ret;
188}
189
190static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
191			  unsigned int keylen)
192{
193	return -ENOSYS;
194}
195
196static void ahash_set_needkey(struct crypto_ahash *tfm)
197{
198	const struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
199
200	if (tfm->setkey != ahash_nosetkey &&
201	    !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
202		crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
203}
204
205int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
206			unsigned int keylen)
207{
208	unsigned long alignmask = crypto_ahash_alignmask(tfm);
209	int err;
210
211	if ((unsigned long)key & alignmask)
212		err = ahash_setkey_unaligned(tfm, key, keylen);
213	else
214		err = tfm->setkey(tfm, key, keylen);
215
216	if (unlikely(err)) {
217		ahash_set_needkey(tfm);
218		return err;
219	}
220
221	crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
222	return 0;
223}
224EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
225
226static inline unsigned int ahash_align_buffer_size(unsigned len,
227						   unsigned long mask)
228{
229	return len + (mask & ~(crypto_tfm_ctx_alignment() - 1));
230}
231
232static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt)
233{
234	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
235	unsigned long alignmask = crypto_ahash_alignmask(tfm);
236	unsigned int ds = crypto_ahash_digestsize(tfm);
237	struct ahash_request_priv *priv;
238
239	priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask),
240		       (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
241		       GFP_KERNEL : GFP_ATOMIC);
242	if (!priv)
243		return -ENOMEM;
244
245	/*
246	 * WARNING: Voodoo programming below!
247	 *
248	 * The code below is obscure and hard to understand, thus explanation
249	 * is necessary. See include/crypto/hash.h and include/linux/crypto.h
250	 * to understand the layout of structures used here!
251	 *
252	 * The code here will replace portions of the ORIGINAL request with
253	 * pointers to new code and buffers so the hashing operation can store
254	 * the result in aligned buffer. We will call the modified request
255	 * an ADJUSTED request.
256	 *
257	 * The newly mangled request will look as such:
258	 *
259	 * req {
260	 *   .result        = ADJUSTED[new aligned buffer]
261	 *   .base.complete = ADJUSTED[pointer to completion function]
262	 *   .base.data     = ADJUSTED[*req (pointer to self)]
263	 *   .priv          = ADJUSTED[new priv] {
264	 *           .result   = ORIGINAL(result)
265	 *           .complete = ORIGINAL(base.complete)
266	 *           .data     = ORIGINAL(base.data)
267	 *   }
268	 */
269
270	priv->result = req->result;
271	priv->complete = req->base.complete;
272	priv->data = req->base.data;
273	priv->flags = req->base.flags;
274
275	/*
276	 * WARNING: We do not backup req->priv here! The req->priv
277	 *          is for internal use of the Crypto API and the
278	 *          user must _NOT_ _EVER_ depend on it's content!
279	 */
280
281	req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1);
282	req->base.complete = cplt;
283	req->base.data = req;
284	req->priv = priv;
285
286	return 0;
287}
288
289static void ahash_restore_req(struct ahash_request *req, int err)
290{
291	struct ahash_request_priv *priv = req->priv;
292
293	if (!err)
294		memcpy(priv->result, req->result,
295		       crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
296
297	/* Restore the original crypto request. */
298	req->result = priv->result;
299
300	ahash_request_set_callback(req, priv->flags,
301				   priv->complete, priv->data);
302	req->priv = NULL;
303
304	/* Free the req->priv.priv from the ADJUSTED request. */
305	kfree_sensitive(priv);
306}
307
308static void ahash_notify_einprogress(struct ahash_request *req)
309{
310	struct ahash_request_priv *priv = req->priv;
311	struct crypto_async_request oreq;
312
313	oreq.data = priv->data;
314
315	priv->complete(&oreq, -EINPROGRESS);
316}
317
318static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
319{
320	struct ahash_request *areq = req->data;
321
322	if (err == -EINPROGRESS) {
323		ahash_notify_einprogress(areq);
324		return;
325	}
326
327	/*
328	 * Restore the original request, see ahash_op_unaligned() for what
329	 * goes where.
330	 *
331	 * The "struct ahash_request *req" here is in fact the "req.base"
332	 * from the ADJUSTED request from ahash_op_unaligned(), thus as it
333	 * is a pointer to self, it is also the ADJUSTED "req" .
334	 */
335
336	/* First copy req->result into req->priv.result */
337	ahash_restore_req(areq, err);
338
339	/* Complete the ORIGINAL request. */
340	areq->base.complete(&areq->base, err);
341}
342
343static int ahash_op_unaligned(struct ahash_request *req,
344			      int (*op)(struct ahash_request *))
345{
346	int err;
347
348	err = ahash_save_req(req, ahash_op_unaligned_done);
349	if (err)
350		return err;
351
352	err = op(req);
353	if (err == -EINPROGRESS || err == -EBUSY)
354		return err;
355
356	ahash_restore_req(req, err);
357
358	return err;
359}
360
361static int crypto_ahash_op(struct ahash_request *req,
362			   int (*op)(struct ahash_request *))
363{
364	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
365	unsigned long alignmask = crypto_ahash_alignmask(tfm);
366
367	if ((unsigned long)req->result & alignmask)
368		return ahash_op_unaligned(req, op);
369
370	return op(req);
371}
372
373int crypto_ahash_final(struct ahash_request *req)
374{
375	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
376	struct crypto_alg *alg = tfm->base.__crt_alg;
377	unsigned int nbytes = req->nbytes;
378	int ret;
379
380	crypto_stats_get(alg);
381	ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
382	crypto_stats_ahash_final(nbytes, ret, alg);
383	return ret;
384}
385EXPORT_SYMBOL_GPL(crypto_ahash_final);
386
387int crypto_ahash_finup(struct ahash_request *req)
388{
389	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
390	struct crypto_alg *alg = tfm->base.__crt_alg;
391	unsigned int nbytes = req->nbytes;
392	int ret;
393
394	crypto_stats_get(alg);
395	ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
396	crypto_stats_ahash_final(nbytes, ret, alg);
397	return ret;
398}
399EXPORT_SYMBOL_GPL(crypto_ahash_finup);
400
401int crypto_ahash_digest(struct ahash_request *req)
402{
403	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
404	struct crypto_alg *alg = tfm->base.__crt_alg;
405	unsigned int nbytes = req->nbytes;
406	int ret;
407
408	crypto_stats_get(alg);
409	if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
410		ret = -ENOKEY;
411	else
412		ret = crypto_ahash_op(req, tfm->digest);
413	crypto_stats_ahash_final(nbytes, ret, alg);
414	return ret;
415}
416EXPORT_SYMBOL_GPL(crypto_ahash_digest);
417
418static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
419{
420	struct ahash_request *areq = req->data;
421
422	if (err == -EINPROGRESS)
423		return;
424
425	ahash_restore_req(areq, err);
426
427	areq->base.complete(&areq->base, err);
428}
429
430static int ahash_def_finup_finish1(struct ahash_request *req, int err)
431{
432	if (err)
433		goto out;
434
435	req->base.complete = ahash_def_finup_done2;
436
437	err = crypto_ahash_reqtfm(req)->final(req);
438	if (err == -EINPROGRESS || err == -EBUSY)
439		return err;
440
441out:
442	ahash_restore_req(req, err);
443	return err;
444}
445
446static void ahash_def_finup_done1(struct crypto_async_request *req, int err)
447{
448	struct ahash_request *areq = req->data;
449
450	if (err == -EINPROGRESS) {
451		ahash_notify_einprogress(areq);
452		return;
453	}
454
455	areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
456
457	err = ahash_def_finup_finish1(areq, err);
458	if (areq->priv)
459		return;
460
461	areq->base.complete(&areq->base, err);
462}
463
464static int ahash_def_finup(struct ahash_request *req)
465{
466	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
467	int err;
468
469	err = ahash_save_req(req, ahash_def_finup_done1);
470	if (err)
471		return err;
472
473	err = tfm->update(req);
474	if (err == -EINPROGRESS || err == -EBUSY)
475		return err;
476
477	return ahash_def_finup_finish1(req, err);
478}
479
 
 
 
 
 
 
 
 
480static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
481{
482	struct crypto_ahash *hash = __crypto_ahash_cast(tfm);
483	struct ahash_alg *alg = crypto_ahash_alg(hash);
484
485	hash->setkey = ahash_nosetkey;
486
487	if (tfm->__crt_alg->cra_type != &crypto_ahash_type)
488		return crypto_init_shash_ops_async(tfm);
489
490	hash->init = alg->init;
491	hash->update = alg->update;
492	hash->final = alg->final;
493	hash->finup = alg->finup ?: ahash_def_finup;
494	hash->digest = alg->digest;
495	hash->export = alg->export;
496	hash->import = alg->import;
497
498	if (alg->setkey) {
499		hash->setkey = alg->setkey;
500		ahash_set_needkey(hash);
501	}
502
503	return 0;
 
 
 
504}
505
506static unsigned int crypto_ahash_extsize(struct crypto_alg *alg)
507{
508	if (alg->cra_type != &crypto_ahash_type)
509		return sizeof(struct crypto_shash *);
510
511	return crypto_alg_extsize(alg);
512}
513
514static void crypto_ahash_free_instance(struct crypto_instance *inst)
515{
516	struct ahash_instance *ahash = ahash_instance(inst);
517
518	ahash->free(ahash);
519}
520
521#ifdef CONFIG_NET
522static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
523{
524	struct crypto_report_hash rhash;
525
526	memset(&rhash, 0, sizeof(rhash));
527
528	strscpy(rhash.type, "ahash", sizeof(rhash.type));
529
530	rhash.blocksize = alg->cra_blocksize;
531	rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
532
533	return nla_put(skb, CRYPTOCFGA_REPORT_HASH, sizeof(rhash), &rhash);
534}
535#else
536static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
537{
538	return -ENOSYS;
539}
540#endif
541
542static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
543	__maybe_unused;
544static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg)
545{
546	seq_printf(m, "type         : ahash\n");
547	seq_printf(m, "async        : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
548					     "yes" : "no");
549	seq_printf(m, "blocksize    : %u\n", alg->cra_blocksize);
550	seq_printf(m, "digestsize   : %u\n",
551		   __crypto_hash_alg_common(alg)->digestsize);
552}
553
554static const struct crypto_type crypto_ahash_type = {
555	.extsize = crypto_ahash_extsize,
556	.init_tfm = crypto_ahash_init_tfm,
557	.free = crypto_ahash_free_instance,
558#ifdef CONFIG_PROC_FS
559	.show = crypto_ahash_show,
560#endif
561	.report = crypto_ahash_report,
562	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
563	.maskset = CRYPTO_ALG_TYPE_AHASH_MASK,
564	.type = CRYPTO_ALG_TYPE_AHASH,
565	.tfmsize = offsetof(struct crypto_ahash, base),
566};
567
568int crypto_grab_ahash(struct crypto_ahash_spawn *spawn,
569		      struct crypto_instance *inst,
570		      const char *name, u32 type, u32 mask)
571{
572	spawn->base.frontend = &crypto_ahash_type;
573	return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
574}
575EXPORT_SYMBOL_GPL(crypto_grab_ahash);
576
577struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
578					u32 mask)
579{
580	return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask);
581}
582EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
583
584int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
585{
586	return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask);
587}
588EXPORT_SYMBOL_GPL(crypto_has_ahash);
589
590static int ahash_prepare_alg(struct ahash_alg *alg)
591{
592	struct crypto_alg *base = &alg->halg.base;
593
594	if (alg->halg.digestsize > HASH_MAX_DIGESTSIZE ||
595	    alg->halg.statesize > HASH_MAX_STATESIZE ||
596	    alg->halg.statesize == 0)
597		return -EINVAL;
598
599	base->cra_type = &crypto_ahash_type;
600	base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
601	base->cra_flags |= CRYPTO_ALG_TYPE_AHASH;
602
603	return 0;
604}
605
606int crypto_register_ahash(struct ahash_alg *alg)
607{
608	struct crypto_alg *base = &alg->halg.base;
609	int err;
610
611	err = ahash_prepare_alg(alg);
612	if (err)
613		return err;
614
615	return crypto_register_alg(base);
616}
617EXPORT_SYMBOL_GPL(crypto_register_ahash);
618
619void crypto_unregister_ahash(struct ahash_alg *alg)
620{
621	crypto_unregister_alg(&alg->halg.base);
622}
623EXPORT_SYMBOL_GPL(crypto_unregister_ahash);
624
625int crypto_register_ahashes(struct ahash_alg *algs, int count)
626{
627	int i, ret;
628
629	for (i = 0; i < count; i++) {
630		ret = crypto_register_ahash(&algs[i]);
631		if (ret)
632			goto err;
633	}
634
635	return 0;
636
637err:
638	for (--i; i >= 0; --i)
639		crypto_unregister_ahash(&algs[i]);
640
641	return ret;
642}
643EXPORT_SYMBOL_GPL(crypto_register_ahashes);
644
645void crypto_unregister_ahashes(struct ahash_alg *algs, int count)
646{
647	int i;
648
649	for (i = count - 1; i >= 0; --i)
650		crypto_unregister_ahash(&algs[i]);
651}
652EXPORT_SYMBOL_GPL(crypto_unregister_ahashes);
653
654int ahash_register_instance(struct crypto_template *tmpl,
655			    struct ahash_instance *inst)
656{
657	int err;
658
659	if (WARN_ON(!inst->free))
660		return -EINVAL;
661
662	err = ahash_prepare_alg(&inst->alg);
663	if (err)
664		return err;
665
666	return crypto_register_instance(tmpl, ahash_crypto_instance(inst));
667}
668EXPORT_SYMBOL_GPL(ahash_register_instance);
669
670bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg)
671{
672	struct crypto_alg *alg = &halg->base;
673
674	if (alg->cra_type != &crypto_ahash_type)
675		return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg));
676
677	return __crypto_ahash_alg(alg)->setkey != NULL;
678}
679EXPORT_SYMBOL_GPL(crypto_hash_alg_has_setkey);
680
681MODULE_LICENSE("GPL");
682MODULE_DESCRIPTION("Asynchronous cryptographic hash type");