Linux Audio

Check our new training course

Loading...
v3.15
 
  1/*
  2 * pcrypt - Parallel crypto wrapper.
  3 *
  4 * Copyright (C) 2009 secunet Security Networks AG
  5 * Copyright (C) 2009 Steffen Klassert <steffen.klassert@secunet.com>
  6 *
  7 * This program is free software; you can redistribute it and/or modify it
  8 * under the terms and conditions of the GNU General Public License,
  9 * version 2, as published by the Free Software Foundation.
 10 *
 11 * This program is distributed in the hope it will be useful, but WITHOUT
 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 13 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 14 * more details.
 15 *
 16 * You should have received a copy of the GNU General Public License along with
 17 * this program; if not, write to the Free Software Foundation, Inc.,
 18 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
 19 */
 20
 21#include <crypto/algapi.h>
 22#include <crypto/internal/aead.h>
 
 23#include <linux/err.h>
 24#include <linux/init.h>
 25#include <linux/module.h>
 26#include <linux/slab.h>
 27#include <linux/notifier.h>
 28#include <linux/kobject.h>
 29#include <linux/cpu.h>
 30#include <crypto/pcrypt.h>
 31
 32struct padata_pcrypt {
 33	struct padata_instance *pinst;
 34	struct workqueue_struct *wq;
 35
 36	/*
 37	 * Cpumask for callback CPUs. It should be
 38	 * equal to serial cpumask of corresponding padata instance,
 39	 * so it is updated when padata notifies us about serial
 40	 * cpumask change.
 41	 *
 42	 * cb_cpumask is protected by RCU. This fact prevents us from
 43	 * using cpumask_var_t directly because the actual type of
 44	 * cpumsak_var_t depends on kernel configuration(particularly on
 45	 * CONFIG_CPUMASK_OFFSTACK macro). Depending on the configuration
 46	 * cpumask_var_t may be either a pointer to the struct cpumask
 47	 * or a variable allocated on the stack. Thus we can not safely use
 48	 * cpumask_var_t with RCU operations such as rcu_assign_pointer or
 49	 * rcu_dereference. So cpumask_var_t is wrapped with struct
 50	 * pcrypt_cpumask which makes possible to use it with RCU.
 51	 */
 52	struct pcrypt_cpumask {
 53		cpumask_var_t mask;
 54	} *cb_cpumask;
 55	struct notifier_block nblock;
 56};
 57
 58static struct padata_pcrypt pencrypt;
 59static struct padata_pcrypt pdecrypt;
 60static struct kset           *pcrypt_kset;
 61
 62struct pcrypt_instance_ctx {
 63	struct crypto_spawn spawn;
 64	unsigned int tfm_count;
 
 
 65};
 66
 67struct pcrypt_aead_ctx {
 68	struct crypto_aead *child;
 69	unsigned int cb_cpu;
 70};
 71
 72static int pcrypt_do_parallel(struct padata_priv *padata, unsigned int *cb_cpu,
 73			      struct padata_pcrypt *pcrypt)
 74{
 75	unsigned int cpu_index, cpu, i;
 76	struct pcrypt_cpumask *cpumask;
 77
 78	cpu = *cb_cpu;
 79
 80	rcu_read_lock_bh();
 81	cpumask = rcu_dereference_bh(pcrypt->cb_cpumask);
 82	if (cpumask_test_cpu(cpu, cpumask->mask))
 83			goto out;
 84
 85	if (!cpumask_weight(cpumask->mask))
 86			goto out;
 87
 88	cpu_index = cpu % cpumask_weight(cpumask->mask);
 89
 90	cpu = cpumask_first(cpumask->mask);
 91	for (i = 0; i < cpu_index; i++)
 92		cpu = cpumask_next(cpu, cpumask->mask);
 93
 94	*cb_cpu = cpu;
 95
 96out:
 97	rcu_read_unlock_bh();
 98	return padata_do_parallel(pcrypt->pinst, padata, cpu);
 99}
100
101static int pcrypt_aead_setkey(struct crypto_aead *parent,
102			      const u8 *key, unsigned int keylen)
103{
104	struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent);
105
106	return crypto_aead_setkey(ctx->child, key, keylen);
107}
108
109static int pcrypt_aead_setauthsize(struct crypto_aead *parent,
110				   unsigned int authsize)
111{
112	struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent);
113
114	return crypto_aead_setauthsize(ctx->child, authsize);
115}
116
117static void pcrypt_aead_serial(struct padata_priv *padata)
118{
119	struct pcrypt_request *preq = pcrypt_padata_request(padata);
120	struct aead_request *req = pcrypt_request_ctx(preq);
121
122	aead_request_complete(req->base.data, padata->info);
123}
124
125static void pcrypt_aead_giv_serial(struct padata_priv *padata)
126{
127	struct pcrypt_request *preq = pcrypt_padata_request(padata);
128	struct aead_givcrypt_request *req = pcrypt_request_ctx(preq);
129
130	aead_request_complete(req->areq.base.data, padata->info);
131}
132
133static void pcrypt_aead_done(struct crypto_async_request *areq, int err)
134{
135	struct aead_request *req = areq->data;
136	struct pcrypt_request *preq = aead_request_ctx(req);
137	struct padata_priv *padata = pcrypt_request_padata(preq);
138
139	padata->info = err;
140	req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
141
142	padata_do_serial(padata);
143}
144
145static void pcrypt_aead_enc(struct padata_priv *padata)
146{
147	struct pcrypt_request *preq = pcrypt_padata_request(padata);
148	struct aead_request *req = pcrypt_request_ctx(preq);
 
149
150	padata->info = crypto_aead_encrypt(req);
151
152	if (padata->info == -EINPROGRESS)
153		return;
154
 
155	padata_do_serial(padata);
156}
157
158static int pcrypt_aead_encrypt(struct aead_request *req)
159{
160	int err;
161	struct pcrypt_request *preq = aead_request_ctx(req);
162	struct aead_request *creq = pcrypt_request_ctx(preq);
163	struct padata_priv *padata = pcrypt_request_padata(preq);
164	struct crypto_aead *aead = crypto_aead_reqtfm(req);
165	struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
166	u32 flags = aead_request_flags(req);
 
 
 
167
168	memset(padata, 0, sizeof(struct padata_priv));
169
170	padata->parallel = pcrypt_aead_enc;
171	padata->serial = pcrypt_aead_serial;
172
173	aead_request_set_tfm(creq, ctx->child);
174	aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
175				  pcrypt_aead_done, req);
176	aead_request_set_crypt(creq, req->src, req->dst,
177			       req->cryptlen, req->iv);
178	aead_request_set_assoc(creq, req->assoc, req->assoclen);
179
180	err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt);
181	if (!err)
182		return -EINPROGRESS;
183
184	return err;
185}
186
187static void pcrypt_aead_dec(struct padata_priv *padata)
188{
189	struct pcrypt_request *preq = pcrypt_padata_request(padata);
190	struct aead_request *req = pcrypt_request_ctx(preq);
 
191
192	padata->info = crypto_aead_decrypt(req);
193
194	if (padata->info == -EINPROGRESS)
195		return;
196
 
197	padata_do_serial(padata);
198}
199
200static int pcrypt_aead_decrypt(struct aead_request *req)
201{
202	int err;
203	struct pcrypt_request *preq = aead_request_ctx(req);
204	struct aead_request *creq = pcrypt_request_ctx(preq);
205	struct padata_priv *padata = pcrypt_request_padata(preq);
206	struct crypto_aead *aead = crypto_aead_reqtfm(req);
207	struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
208	u32 flags = aead_request_flags(req);
 
 
 
209
210	memset(padata, 0, sizeof(struct padata_priv));
211
212	padata->parallel = pcrypt_aead_dec;
213	padata->serial = pcrypt_aead_serial;
214
215	aead_request_set_tfm(creq, ctx->child);
216	aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
217				  pcrypt_aead_done, req);
218	aead_request_set_crypt(creq, req->src, req->dst,
219			       req->cryptlen, req->iv);
220	aead_request_set_assoc(creq, req->assoc, req->assoclen);
221
222	err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pdecrypt);
223	if (!err)
224		return -EINPROGRESS;
225
226	return err;
227}
228
229static void pcrypt_aead_givenc(struct padata_priv *padata)
230{
231	struct pcrypt_request *preq = pcrypt_padata_request(padata);
232	struct aead_givcrypt_request *req = pcrypt_request_ctx(preq);
233
234	padata->info = crypto_aead_givencrypt(req);
235
236	if (padata->info == -EINPROGRESS)
237		return;
238
239	padata_do_serial(padata);
240}
241
242static int pcrypt_aead_givencrypt(struct aead_givcrypt_request *req)
243{
244	int err;
245	struct aead_request *areq = &req->areq;
246	struct pcrypt_request *preq = aead_request_ctx(areq);
247	struct aead_givcrypt_request *creq = pcrypt_request_ctx(preq);
248	struct padata_priv *padata = pcrypt_request_padata(preq);
249	struct crypto_aead *aead = aead_givcrypt_reqtfm(req);
250	struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
251	u32 flags = aead_request_flags(areq);
252
253	memset(padata, 0, sizeof(struct padata_priv));
254
255	padata->parallel = pcrypt_aead_givenc;
256	padata->serial = pcrypt_aead_giv_serial;
257
258	aead_givcrypt_set_tfm(creq, ctx->child);
259	aead_givcrypt_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
260				   pcrypt_aead_done, areq);
261	aead_givcrypt_set_crypt(creq, areq->src, areq->dst,
262				areq->cryptlen, areq->iv);
263	aead_givcrypt_set_assoc(creq, areq->assoc, areq->assoclen);
264	aead_givcrypt_set_giv(creq, req->giv, req->seq);
265
266	err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt);
267	if (!err)
268		return -EINPROGRESS;
269
270	return err;
271}
272
273static int pcrypt_aead_init_tfm(struct crypto_tfm *tfm)
274{
275	int cpu, cpu_index;
276	struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
277	struct pcrypt_instance_ctx *ictx = crypto_instance_ctx(inst);
278	struct pcrypt_aead_ctx *ctx = crypto_tfm_ctx(tfm);
279	struct crypto_aead *cipher;
280
281	ictx->tfm_count++;
282
283	cpu_index = ictx->tfm_count % cpumask_weight(cpu_online_mask);
284
285	ctx->cb_cpu = cpumask_first(cpu_online_mask);
286	for (cpu = 0; cpu < cpu_index; cpu++)
287		ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_online_mask);
288
289	cipher = crypto_spawn_aead(crypto_instance_ctx(inst));
290
291	if (IS_ERR(cipher))
292		return PTR_ERR(cipher);
293
294	ctx->child = cipher;
295	tfm->crt_aead.reqsize = sizeof(struct pcrypt_request)
296		+ sizeof(struct aead_givcrypt_request)
297		+ crypto_aead_reqsize(cipher);
298
299	return 0;
300}
301
302static void pcrypt_aead_exit_tfm(struct crypto_tfm *tfm)
303{
304	struct pcrypt_aead_ctx *ctx = crypto_tfm_ctx(tfm);
305
306	crypto_free_aead(ctx->child);
307}
308
309static struct crypto_instance *pcrypt_alloc_instance(struct crypto_alg *alg)
310{
311	struct crypto_instance *inst;
312	struct pcrypt_instance_ctx *ctx;
313	int err;
314
315	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
316	if (!inst) {
317		inst = ERR_PTR(-ENOMEM);
318		goto out;
319	}
320
321	err = -ENAMETOOLONG;
 
 
322	if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
323		     "pcrypt(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
324		goto out_free_inst;
325
326	memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
327
328	ctx = crypto_instance_ctx(inst);
329	err = crypto_init_spawn(&ctx->spawn, alg, inst,
330				CRYPTO_ALG_TYPE_MASK);
331	if (err)
332		goto out_free_inst;
333
334	inst->alg.cra_priority = alg->cra_priority + 100;
335	inst->alg.cra_blocksize = alg->cra_blocksize;
336	inst->alg.cra_alignmask = alg->cra_alignmask;
337
338out:
339	return inst;
340
341out_free_inst:
342	kfree(inst);
343	inst = ERR_PTR(err);
344	goto out;
345}
346
347static struct crypto_instance *pcrypt_alloc_aead(struct rtattr **tb,
348						 u32 type, u32 mask)
349{
350	struct crypto_instance *inst;
351	struct crypto_alg *alg;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
352
353	alg = crypto_get_attr_alg(tb, type, (mask & CRYPTO_ALG_TYPE_MASK));
354	if (IS_ERR(alg))
355		return ERR_CAST(alg);
 
356
357	inst = pcrypt_alloc_instance(alg);
358	if (IS_ERR(inst))
359		goto out_put_alg;
360
361	inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
362	inst->alg.cra_type = &crypto_aead_type;
363
364	inst->alg.cra_aead.ivsize = alg->cra_aead.ivsize;
365	inst->alg.cra_aead.geniv = alg->cra_aead.geniv;
366	inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize;
367
368	inst->alg.cra_ctxsize = sizeof(struct pcrypt_aead_ctx);
 
369
370	inst->alg.cra_init = pcrypt_aead_init_tfm;
371	inst->alg.cra_exit = pcrypt_aead_exit_tfm;
 
 
372
373	inst->alg.cra_aead.setkey = pcrypt_aead_setkey;
374	inst->alg.cra_aead.setauthsize = pcrypt_aead_setauthsize;
375	inst->alg.cra_aead.encrypt = pcrypt_aead_encrypt;
376	inst->alg.cra_aead.decrypt = pcrypt_aead_decrypt;
377	inst->alg.cra_aead.givencrypt = pcrypt_aead_givencrypt;
378
379out_put_alg:
380	crypto_mod_put(alg);
381	return inst;
 
 
 
382}
383
384static struct crypto_instance *pcrypt_alloc(struct rtattr **tb)
385{
386	struct crypto_attr_type *algt;
387
388	algt = crypto_get_attr_type(tb);
389	if (IS_ERR(algt))
390		return ERR_CAST(algt);
391
392	switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
393	case CRYPTO_ALG_TYPE_AEAD:
394		return pcrypt_alloc_aead(tb, algt->type, algt->mask);
395	}
396
397	return ERR_PTR(-EINVAL);
398}
399
400static void pcrypt_free(struct crypto_instance *inst)
401{
402	struct pcrypt_instance_ctx *ctx = crypto_instance_ctx(inst);
403
404	crypto_drop_spawn(&ctx->spawn);
405	kfree(inst);
406}
407
408static int pcrypt_cpumask_change_notify(struct notifier_block *self,
409					unsigned long val, void *data)
410{
411	struct padata_pcrypt *pcrypt;
412	struct pcrypt_cpumask *new_mask, *old_mask;
413	struct padata_cpumask *cpumask = (struct padata_cpumask *)data;
414
415	if (!(val & PADATA_CPU_SERIAL))
416		return 0;
417
418	pcrypt = container_of(self, struct padata_pcrypt, nblock);
419	new_mask = kmalloc(sizeof(*new_mask), GFP_KERNEL);
420	if (!new_mask)
421		return -ENOMEM;
422	if (!alloc_cpumask_var(&new_mask->mask, GFP_KERNEL)) {
423		kfree(new_mask);
424		return -ENOMEM;
425	}
426
427	old_mask = pcrypt->cb_cpumask;
428
429	cpumask_copy(new_mask->mask, cpumask->cbcpu);
430	rcu_assign_pointer(pcrypt->cb_cpumask, new_mask);
431	synchronize_rcu_bh();
432
433	free_cpumask_var(old_mask->mask);
434	kfree(old_mask);
435	return 0;
436}
437
438static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
439{
440	int ret;
441
442	pinst->kobj.kset = pcrypt_kset;
443	ret = kobject_add(&pinst->kobj, NULL, name);
444	if (!ret)
445		kobject_uevent(&pinst->kobj, KOBJ_ADD);
446
447	return ret;
448}
449
450static int pcrypt_init_padata(struct padata_pcrypt *pcrypt,
451			      const char *name)
452{
453	int ret = -ENOMEM;
454	struct pcrypt_cpumask *mask;
455
456	get_online_cpus();
457
458	pcrypt->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE,
459				     1, name);
460	if (!pcrypt->wq)
461		goto err;
462
463	pcrypt->pinst = padata_alloc_possible(pcrypt->wq);
464	if (!pcrypt->pinst)
465		goto err_destroy_workqueue;
466
467	mask = kmalloc(sizeof(*mask), GFP_KERNEL);
468	if (!mask)
469		goto err_free_padata;
470	if (!alloc_cpumask_var(&mask->mask, GFP_KERNEL)) {
471		kfree(mask);
472		goto err_free_padata;
473	}
474
475	cpumask_and(mask->mask, cpu_possible_mask, cpu_online_mask);
476	rcu_assign_pointer(pcrypt->cb_cpumask, mask);
477
478	pcrypt->nblock.notifier_call = pcrypt_cpumask_change_notify;
479	ret = padata_register_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
480	if (ret)
481		goto err_free_cpumask;
482
483	ret = pcrypt_sysfs_add(pcrypt->pinst, name);
484	if (ret)
485		goto err_unregister_notifier;
486
487	put_online_cpus();
488
489	return ret;
490
491err_unregister_notifier:
492	padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
493err_free_cpumask:
494	free_cpumask_var(mask->mask);
495	kfree(mask);
496err_free_padata:
497	padata_free(pcrypt->pinst);
498err_destroy_workqueue:
499	destroy_workqueue(pcrypt->wq);
500err:
501	put_online_cpus();
502
503	return ret;
504}
505
506static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt)
507{
508	free_cpumask_var(pcrypt->cb_cpumask->mask);
509	kfree(pcrypt->cb_cpumask);
510
511	padata_stop(pcrypt->pinst);
512	padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
513	destroy_workqueue(pcrypt->wq);
514	padata_free(pcrypt->pinst);
515}
516
517static struct crypto_template pcrypt_tmpl = {
518	.name = "pcrypt",
519	.alloc = pcrypt_alloc,
520	.free = pcrypt_free,
521	.module = THIS_MODULE,
522};
523
524static int __init pcrypt_init(void)
525{
526	int err = -ENOMEM;
527
528	pcrypt_kset = kset_create_and_add("pcrypt", NULL, kernel_kobj);
529	if (!pcrypt_kset)
530		goto err;
531
532	err = pcrypt_init_padata(&pencrypt, "pencrypt");
533	if (err)
534		goto err_unreg_kset;
535
536	err = pcrypt_init_padata(&pdecrypt, "pdecrypt");
537	if (err)
538		goto err_deinit_pencrypt;
539
540	padata_start(pencrypt.pinst);
541	padata_start(pdecrypt.pinst);
542
543	return crypto_register_template(&pcrypt_tmpl);
544
545err_deinit_pencrypt:
546	pcrypt_fini_padata(&pencrypt);
547err_unreg_kset:
548	kset_unregister(pcrypt_kset);
549err:
550	return err;
551}
552
553static void __exit pcrypt_exit(void)
554{
555	pcrypt_fini_padata(&pencrypt);
556	pcrypt_fini_padata(&pdecrypt);
 
 
557
558	kset_unregister(pcrypt_kset);
559	crypto_unregister_template(&pcrypt_tmpl);
560}
561
562module_init(pcrypt_init);
563module_exit(pcrypt_exit);
564
565MODULE_LICENSE("GPL");
566MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
567MODULE_DESCRIPTION("Parallel crypto wrapper");
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * pcrypt - Parallel crypto wrapper.
  4 *
  5 * Copyright (C) 2009 secunet Security Networks AG
  6 * Copyright (C) 2009 Steffen Klassert <steffen.klassert@secunet.com>
 
 
 
 
 
 
 
 
 
 
 
 
 
  7 */
  8
  9#include <crypto/algapi.h>
 10#include <crypto/internal/aead.h>
 11#include <linux/atomic.h>
 12#include <linux/err.h>
 13#include <linux/init.h>
 14#include <linux/module.h>
 15#include <linux/slab.h>
 
 16#include <linux/kobject.h>
 17#include <linux/cpu.h>
 18#include <crypto/pcrypt.h>
 19
 20static struct padata_instance *pencrypt;
 21static struct padata_instance *pdecrypt;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 22static struct kset           *pcrypt_kset;
 23
 24struct pcrypt_instance_ctx {
 25	struct crypto_aead_spawn spawn;
 26	struct padata_shell *psenc;
 27	struct padata_shell *psdec;
 28	atomic_t tfm_count;
 29};
 30
 31struct pcrypt_aead_ctx {
 32	struct crypto_aead *child;
 33	unsigned int cb_cpu;
 34};
 35
 36static inline struct pcrypt_instance_ctx *pcrypt_tfm_ictx(
 37	struct crypto_aead *tfm)
 38{
 39	return aead_instance_ctx(aead_alg_instance(tfm));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 40}
 41
 42static int pcrypt_aead_setkey(struct crypto_aead *parent,
 43			      const u8 *key, unsigned int keylen)
 44{
 45	struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent);
 46
 47	return crypto_aead_setkey(ctx->child, key, keylen);
 48}
 49
 50static int pcrypt_aead_setauthsize(struct crypto_aead *parent,
 51				   unsigned int authsize)
 52{
 53	struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent);
 54
 55	return crypto_aead_setauthsize(ctx->child, authsize);
 56}
 57
 58static void pcrypt_aead_serial(struct padata_priv *padata)
 59{
 60	struct pcrypt_request *preq = pcrypt_padata_request(padata);
 61	struct aead_request *req = pcrypt_request_ctx(preq);
 62
 63	aead_request_complete(req->base.data, padata->info);
 64}
 65
 
 
 
 
 
 
 
 
 66static void pcrypt_aead_done(struct crypto_async_request *areq, int err)
 67{
 68	struct aead_request *req = areq->data;
 69	struct pcrypt_request *preq = aead_request_ctx(req);
 70	struct padata_priv *padata = pcrypt_request_padata(preq);
 71
 72	padata->info = err;
 
 73
 74	padata_do_serial(padata);
 75}
 76
 77static void pcrypt_aead_enc(struct padata_priv *padata)
 78{
 79	struct pcrypt_request *preq = pcrypt_padata_request(padata);
 80	struct aead_request *req = pcrypt_request_ctx(preq);
 81	int ret;
 82
 83	ret = crypto_aead_encrypt(req);
 84
 85	if (ret == -EINPROGRESS)
 86		return;
 87
 88	padata->info = ret;
 89	padata_do_serial(padata);
 90}
 91
 92static int pcrypt_aead_encrypt(struct aead_request *req)
 93{
 94	int err;
 95	struct pcrypt_request *preq = aead_request_ctx(req);
 96	struct aead_request *creq = pcrypt_request_ctx(preq);
 97	struct padata_priv *padata = pcrypt_request_padata(preq);
 98	struct crypto_aead *aead = crypto_aead_reqtfm(req);
 99	struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
100	u32 flags = aead_request_flags(req);
101	struct pcrypt_instance_ctx *ictx;
102
103	ictx = pcrypt_tfm_ictx(aead);
104
105	memset(padata, 0, sizeof(struct padata_priv));
106
107	padata->parallel = pcrypt_aead_enc;
108	padata->serial = pcrypt_aead_serial;
109
110	aead_request_set_tfm(creq, ctx->child);
111	aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
112				  pcrypt_aead_done, req);
113	aead_request_set_crypt(creq, req->src, req->dst,
114			       req->cryptlen, req->iv);
115	aead_request_set_ad(creq, req->assoclen);
116
117	err = padata_do_parallel(ictx->psenc, padata, &ctx->cb_cpu);
118	if (!err)
119		return -EINPROGRESS;
120
121	return err;
122}
123
124static void pcrypt_aead_dec(struct padata_priv *padata)
125{
126	struct pcrypt_request *preq = pcrypt_padata_request(padata);
127	struct aead_request *req = pcrypt_request_ctx(preq);
128	int ret;
129
130	ret = crypto_aead_decrypt(req);
131
132	if (ret == -EINPROGRESS)
133		return;
134
135	padata->info = ret;
136	padata_do_serial(padata);
137}
138
139static int pcrypt_aead_decrypt(struct aead_request *req)
140{
141	int err;
142	struct pcrypt_request *preq = aead_request_ctx(req);
143	struct aead_request *creq = pcrypt_request_ctx(preq);
144	struct padata_priv *padata = pcrypt_request_padata(preq);
145	struct crypto_aead *aead = crypto_aead_reqtfm(req);
146	struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
147	u32 flags = aead_request_flags(req);
148	struct pcrypt_instance_ctx *ictx;
149
150	ictx = pcrypt_tfm_ictx(aead);
151
152	memset(padata, 0, sizeof(struct padata_priv));
153
154	padata->parallel = pcrypt_aead_dec;
155	padata->serial = pcrypt_aead_serial;
156
157	aead_request_set_tfm(creq, ctx->child);
158	aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
159				  pcrypt_aead_done, req);
160	aead_request_set_crypt(creq, req->src, req->dst,
161			       req->cryptlen, req->iv);
162	aead_request_set_ad(creq, req->assoclen);
163
164	err = padata_do_parallel(ictx->psdec, padata, &ctx->cb_cpu);
165	if (!err)
166		return -EINPROGRESS;
167
168	return err;
169}
170
171static int pcrypt_aead_init_tfm(struct crypto_aead *tfm)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
172{
173	int cpu, cpu_index;
174	struct aead_instance *inst = aead_alg_instance(tfm);
175	struct pcrypt_instance_ctx *ictx = aead_instance_ctx(inst);
176	struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(tfm);
177	struct crypto_aead *cipher;
178
179	cpu_index = (unsigned int)atomic_inc_return(&ictx->tfm_count) %
180		    cpumask_weight(cpu_online_mask);
 
181
182	ctx->cb_cpu = cpumask_first(cpu_online_mask);
183	for (cpu = 0; cpu < cpu_index; cpu++)
184		ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_online_mask);
185
186	cipher = crypto_spawn_aead(&ictx->spawn);
187
188	if (IS_ERR(cipher))
189		return PTR_ERR(cipher);
190
191	ctx->child = cipher;
192	crypto_aead_set_reqsize(tfm, sizeof(struct pcrypt_request) +
193				     sizeof(struct aead_request) +
194				     crypto_aead_reqsize(cipher));
195
196	return 0;
197}
198
199static void pcrypt_aead_exit_tfm(struct crypto_aead *tfm)
200{
201	struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(tfm);
202
203	crypto_free_aead(ctx->child);
204}
205
206static void pcrypt_free(struct aead_instance *inst)
207{
208	struct pcrypt_instance_ctx *ctx = aead_instance_ctx(inst);
 
 
209
210	crypto_drop_aead(&ctx->spawn);
211	padata_free_shell(ctx->psdec);
212	padata_free_shell(ctx->psenc);
213	kfree(inst);
214}
215
216static int pcrypt_init_instance(struct crypto_instance *inst,
217				struct crypto_alg *alg)
218{
219	if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
220		     "pcrypt(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
221		return -ENAMETOOLONG;
222
223	memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
224
 
 
 
 
 
 
225	inst->alg.cra_priority = alg->cra_priority + 100;
226	inst->alg.cra_blocksize = alg->cra_blocksize;
227	inst->alg.cra_alignmask = alg->cra_alignmask;
228
229	return 0;
 
 
 
 
 
 
230}
231
232static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb,
233			      struct crypto_attr_type *algt)
234{
235	struct pcrypt_instance_ctx *ctx;
236	struct aead_instance *inst;
237	struct aead_alg *alg;
238	u32 mask = crypto_algt_inherited_mask(algt);
239	int err;
240
241	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
242	if (!inst)
243		return -ENOMEM;
244
245	err = -ENOMEM;
246
247	ctx = aead_instance_ctx(inst);
248	ctx->psenc = padata_alloc_shell(pencrypt);
249	if (!ctx->psenc)
250		goto err_free_inst;
251
252	ctx->psdec = padata_alloc_shell(pdecrypt);
253	if (!ctx->psdec)
254		goto err_free_inst;
255
256	err = crypto_grab_aead(&ctx->spawn, aead_crypto_instance(inst),
257			       crypto_attr_alg_name(tb[1]), 0, mask);
258	if (err)
259		goto err_free_inst;
260
261	alg = crypto_spawn_aead_alg(&ctx->spawn);
262	err = pcrypt_init_instance(aead_crypto_instance(inst), &alg->base);
263	if (err)
264		goto err_free_inst;
265
266	inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC;
 
 
267
268	inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
269	inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
270
271	inst->alg.base.cra_ctxsize = sizeof(struct pcrypt_aead_ctx);
 
 
272
273	inst->alg.init = pcrypt_aead_init_tfm;
274	inst->alg.exit = pcrypt_aead_exit_tfm;
275
276	inst->alg.setkey = pcrypt_aead_setkey;
277	inst->alg.setauthsize = pcrypt_aead_setauthsize;
278	inst->alg.encrypt = pcrypt_aead_encrypt;
279	inst->alg.decrypt = pcrypt_aead_decrypt;
280
281	inst->free = pcrypt_free;
 
 
 
 
282
283	err = aead_register_instance(tmpl, inst);
284	if (err) {
285err_free_inst:
286		pcrypt_free(inst);
287	}
288	return err;
289}
290
291static int pcrypt_create(struct crypto_template *tmpl, struct rtattr **tb)
292{
293	struct crypto_attr_type *algt;
294
295	algt = crypto_get_attr_type(tb);
296	if (IS_ERR(algt))
297		return PTR_ERR(algt);
298
299	switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
300	case CRYPTO_ALG_TYPE_AEAD:
301		return pcrypt_create_aead(tmpl, tb, algt);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
302	}
303
304	return -EINVAL;
 
 
 
 
 
 
 
 
305}
306
307static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
308{
309	int ret;
310
311	pinst->kobj.kset = pcrypt_kset;
312	ret = kobject_add(&pinst->kobj, NULL, "%s", name);
313	if (!ret)
314		kobject_uevent(&pinst->kobj, KOBJ_ADD);
315
316	return ret;
317}
318
319static int pcrypt_init_padata(struct padata_instance **pinst, const char *name)
 
320{
321	int ret = -ENOMEM;
 
 
 
322
323	*pinst = padata_alloc(name);
324	if (!*pinst)
325		return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
326
327	ret = pcrypt_sysfs_add(*pinst, name);
328	if (ret)
329		padata_free(*pinst);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
330
331	return ret;
332}
333
 
 
 
 
 
 
 
 
 
 
 
334static struct crypto_template pcrypt_tmpl = {
335	.name = "pcrypt",
336	.create = pcrypt_create,
 
337	.module = THIS_MODULE,
338};
339
340static int __init pcrypt_init(void)
341{
342	int err = -ENOMEM;
343
344	pcrypt_kset = kset_create_and_add("pcrypt", NULL, kernel_kobj);
345	if (!pcrypt_kset)
346		goto err;
347
348	err = pcrypt_init_padata(&pencrypt, "pencrypt");
349	if (err)
350		goto err_unreg_kset;
351
352	err = pcrypt_init_padata(&pdecrypt, "pdecrypt");
353	if (err)
354		goto err_deinit_pencrypt;
355
 
 
 
356	return crypto_register_template(&pcrypt_tmpl);
357
358err_deinit_pencrypt:
359	padata_free(pencrypt);
360err_unreg_kset:
361	kset_unregister(pcrypt_kset);
362err:
363	return err;
364}
365
366static void __exit pcrypt_exit(void)
367{
368	crypto_unregister_template(&pcrypt_tmpl);
369
370	padata_free(pencrypt);
371	padata_free(pdecrypt);
372
373	kset_unregister(pcrypt_kset);
 
374}
375
376subsys_initcall(pcrypt_init);
377module_exit(pcrypt_exit);
378
379MODULE_LICENSE("GPL");
380MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
381MODULE_DESCRIPTION("Parallel crypto wrapper");
382MODULE_ALIAS_CRYPTO("pcrypt");