Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * pcrypt - Parallel crypto wrapper.
  4 *
  5 * Copyright (C) 2009 secunet Security Networks AG
  6 * Copyright (C) 2009 Steffen Klassert <steffen.klassert@secunet.com>
 
 
 
 
 
 
 
 
 
 
 
 
 
  7 */
  8
  9#include <crypto/algapi.h>
 10#include <crypto/internal/aead.h>
 11#include <linux/atomic.h>
 12#include <linux/err.h>
 13#include <linux/init.h>
 14#include <linux/module.h>
 15#include <linux/slab.h>
 
 16#include <linux/kobject.h>
 17#include <linux/cpu.h>
 18#include <crypto/pcrypt.h>
 19
 20static struct padata_instance *pencrypt;
 21static struct padata_instance *pdecrypt;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 22static struct kset           *pcrypt_kset;
 23
 24struct pcrypt_instance_ctx {
 25	struct crypto_aead_spawn spawn;
 26	struct padata_shell *psenc;
 27	struct padata_shell *psdec;
 28	atomic_t tfm_count;
 29};
 30
 31struct pcrypt_aead_ctx {
 32	struct crypto_aead *child;
 33	unsigned int cb_cpu;
 34};
 35
 36static inline struct pcrypt_instance_ctx *pcrypt_tfm_ictx(
 37	struct crypto_aead *tfm)
 38{
 39	return aead_instance_ctx(aead_alg_instance(tfm));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 40}
 41
 42static int pcrypt_aead_setkey(struct crypto_aead *parent,
 43			      const u8 *key, unsigned int keylen)
 44{
 45	struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent);
 46
 47	return crypto_aead_setkey(ctx->child, key, keylen);
 48}
 49
 50static int pcrypt_aead_setauthsize(struct crypto_aead *parent,
 51				   unsigned int authsize)
 52{
 53	struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent);
 54
 55	return crypto_aead_setauthsize(ctx->child, authsize);
 56}
 57
 58static void pcrypt_aead_serial(struct padata_priv *padata)
 59{
 60	struct pcrypt_request *preq = pcrypt_padata_request(padata);
 61	struct aead_request *req = pcrypt_request_ctx(preq);
 62
 63	aead_request_complete(req->base.data, padata->info);
 64}
 65
 66static void pcrypt_aead_done(void *data, int err)
 67{
 68	struct aead_request *req = data;
 69	struct pcrypt_request *preq = aead_request_ctx(req);
 70	struct padata_priv *padata = pcrypt_request_padata(preq);
 71
 72	padata->info = err;
 
 73
 74	padata_do_serial(padata);
 75}
 76
 77static void pcrypt_aead_enc(struct padata_priv *padata)
 78{
 79	struct pcrypt_request *preq = pcrypt_padata_request(padata);
 80	struct aead_request *req = pcrypt_request_ctx(preq);
 81	int ret;
 82
 83	ret = crypto_aead_encrypt(req);
 84
 85	if (ret == -EINPROGRESS)
 86		return;
 87
 88	padata->info = ret;
 89	padata_do_serial(padata);
 90}
 91
 92static int pcrypt_aead_encrypt(struct aead_request *req)
 93{
 94	int err;
 95	struct pcrypt_request *preq = aead_request_ctx(req);
 96	struct aead_request *creq = pcrypt_request_ctx(preq);
 97	struct padata_priv *padata = pcrypt_request_padata(preq);
 98	struct crypto_aead *aead = crypto_aead_reqtfm(req);
 99	struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
100	u32 flags = aead_request_flags(req);
101	struct pcrypt_instance_ctx *ictx;
102
103	ictx = pcrypt_tfm_ictx(aead);
104
105	memset(padata, 0, sizeof(struct padata_priv));
106
107	padata->parallel = pcrypt_aead_enc;
108	padata->serial = pcrypt_aead_serial;
109
110	aead_request_set_tfm(creq, ctx->child);
111	aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
112				  pcrypt_aead_done, req);
113	aead_request_set_crypt(creq, req->src, req->dst,
114			       req->cryptlen, req->iv);
115	aead_request_set_ad(creq, req->assoclen);
116
117	err = padata_do_parallel(ictx->psenc, padata, &ctx->cb_cpu);
118	if (!err)
119		return -EINPROGRESS;
120	if (err == -EBUSY) {
121		/* try non-parallel mode */
122		return crypto_aead_encrypt(creq);
123	}
124
125	return err;
126}
127
128static void pcrypt_aead_dec(struct padata_priv *padata)
129{
130	struct pcrypt_request *preq = pcrypt_padata_request(padata);
131	struct aead_request *req = pcrypt_request_ctx(preq);
132	int ret;
133
134	ret = crypto_aead_decrypt(req);
135
136	if (ret == -EINPROGRESS)
137		return;
138
139	padata->info = ret;
140	padata_do_serial(padata);
141}
142
143static int pcrypt_aead_decrypt(struct aead_request *req)
144{
145	int err;
146	struct pcrypt_request *preq = aead_request_ctx(req);
147	struct aead_request *creq = pcrypt_request_ctx(preq);
148	struct padata_priv *padata = pcrypt_request_padata(preq);
149	struct crypto_aead *aead = crypto_aead_reqtfm(req);
150	struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
151	u32 flags = aead_request_flags(req);
152	struct pcrypt_instance_ctx *ictx;
153
154	ictx = pcrypt_tfm_ictx(aead);
155
156	memset(padata, 0, sizeof(struct padata_priv));
157
158	padata->parallel = pcrypt_aead_dec;
159	padata->serial = pcrypt_aead_serial;
160
161	aead_request_set_tfm(creq, ctx->child);
162	aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
163				  pcrypt_aead_done, req);
164	aead_request_set_crypt(creq, req->src, req->dst,
165			       req->cryptlen, req->iv);
166	aead_request_set_ad(creq, req->assoclen);
167
168	err = padata_do_parallel(ictx->psdec, padata, &ctx->cb_cpu);
169	if (!err)
170		return -EINPROGRESS;
171	if (err == -EBUSY) {
172		/* try non-parallel mode */
173		return crypto_aead_decrypt(creq);
174	}
175
176	return err;
177}
178
179static int pcrypt_aead_init_tfm(struct crypto_aead *tfm)
180{
181	int cpu, cpu_index;
182	struct aead_instance *inst = aead_alg_instance(tfm);
183	struct pcrypt_instance_ctx *ictx = aead_instance_ctx(inst);
184	struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(tfm);
185	struct crypto_aead *cipher;
186
187	cpu_index = (unsigned int)atomic_inc_return(&ictx->tfm_count) %
188		    cpumask_weight(cpu_online_mask);
189
190	ctx->cb_cpu = cpumask_first(cpu_online_mask);
191	for (cpu = 0; cpu < cpu_index; cpu++)
192		ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_online_mask);
193
194	cipher = crypto_spawn_aead(&ictx->spawn);
195
196	if (IS_ERR(cipher))
197		return PTR_ERR(cipher);
198
199	ctx->child = cipher;
200	crypto_aead_set_reqsize(tfm, sizeof(struct pcrypt_request) +
201				     sizeof(struct aead_request) +
202				     crypto_aead_reqsize(cipher));
203
204	return 0;
205}
206
207static void pcrypt_aead_exit_tfm(struct crypto_aead *tfm)
208{
209	struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(tfm);
210
211	crypto_free_aead(ctx->child);
212}
213
214static void pcrypt_free(struct aead_instance *inst)
215{
216	struct pcrypt_instance_ctx *ctx = aead_instance_ctx(inst);
217
218	crypto_drop_aead(&ctx->spawn);
219	padata_free_shell(ctx->psdec);
220	padata_free_shell(ctx->psenc);
221	kfree(inst);
222}
223
224static int pcrypt_init_instance(struct crypto_instance *inst,
225				struct crypto_alg *alg)
226{
227	if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
228		     "pcrypt(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
229		return -ENAMETOOLONG;
230
231	memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
232
233	inst->alg.cra_priority = alg->cra_priority + 100;
234	inst->alg.cra_blocksize = alg->cra_blocksize;
235	inst->alg.cra_alignmask = alg->cra_alignmask;
236
237	return 0;
238}
239
240static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb,
241			      struct crypto_attr_type *algt)
242{
243	struct pcrypt_instance_ctx *ctx;
 
244	struct aead_instance *inst;
245	struct aead_alg *alg;
246	u32 mask = crypto_algt_inherited_mask(algt);
247	int err;
248
 
 
 
 
 
 
 
 
249	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
250	if (!inst)
251		return -ENOMEM;
252
253	err = -ENOMEM;
254
255	ctx = aead_instance_ctx(inst);
256	ctx->psenc = padata_alloc_shell(pencrypt);
257	if (!ctx->psenc)
258		goto err_free_inst;
259
260	ctx->psdec = padata_alloc_shell(pdecrypt);
261	if (!ctx->psdec)
262		goto err_free_inst;
263
264	err = crypto_grab_aead(&ctx->spawn, aead_crypto_instance(inst),
265			       crypto_attr_alg_name(tb[1]), 0, mask);
266	if (err)
267		goto err_free_inst;
268
269	alg = crypto_spawn_aead_alg(&ctx->spawn);
270	err = pcrypt_init_instance(aead_crypto_instance(inst), &alg->base);
271	if (err)
272		goto err_free_inst;
273
274	inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC;
275
276	inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
277	inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
278
279	inst->alg.base.cra_ctxsize = sizeof(struct pcrypt_aead_ctx);
280
281	inst->alg.init = pcrypt_aead_init_tfm;
282	inst->alg.exit = pcrypt_aead_exit_tfm;
283
284	inst->alg.setkey = pcrypt_aead_setkey;
285	inst->alg.setauthsize = pcrypt_aead_setauthsize;
286	inst->alg.encrypt = pcrypt_aead_encrypt;
287	inst->alg.decrypt = pcrypt_aead_decrypt;
288
289	inst->free = pcrypt_free;
290
291	err = aead_register_instance(tmpl, inst);
292	if (err) {
293err_free_inst:
294		pcrypt_free(inst);
295	}
296	return err;
 
 
 
 
 
 
297}
298
299static int pcrypt_create(struct crypto_template *tmpl, struct rtattr **tb)
300{
301	struct crypto_attr_type *algt;
302
303	algt = crypto_get_attr_type(tb);
304	if (IS_ERR(algt))
305		return PTR_ERR(algt);
306
307	switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
308	case CRYPTO_ALG_TYPE_AEAD:
309		return pcrypt_create_aead(tmpl, tb, algt);
310	}
311
312	return -EINVAL;
313}
314
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
315static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
316{
317	int ret;
318
319	pinst->kobj.kset = pcrypt_kset;
320	ret = kobject_add(&pinst->kobj, NULL, "%s", name);
321	if (!ret)
322		kobject_uevent(&pinst->kobj, KOBJ_ADD);
323
324	return ret;
325}
326
327static int pcrypt_init_padata(struct padata_instance **pinst, const char *name)
 
328{
329	int ret = -ENOMEM;
 
330
331	*pinst = padata_alloc(name);
332	if (!*pinst)
333		return ret;
 
 
 
334
335	ret = pcrypt_sysfs_add(*pinst, name);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
336	if (ret)
337		padata_free(*pinst);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
338
339	return ret;
340}
341
 
 
 
 
 
 
 
 
 
 
 
342static struct crypto_template pcrypt_tmpl = {
343	.name = "pcrypt",
344	.create = pcrypt_create,
 
345	.module = THIS_MODULE,
346};
347
348static int __init pcrypt_init(void)
349{
350	int err = -ENOMEM;
351
352	pcrypt_kset = kset_create_and_add("pcrypt", NULL, kernel_kobj);
353	if (!pcrypt_kset)
354		goto err;
355
356	err = pcrypt_init_padata(&pencrypt, "pencrypt");
357	if (err)
358		goto err_unreg_kset;
359
360	err = pcrypt_init_padata(&pdecrypt, "pdecrypt");
361	if (err)
362		goto err_deinit_pencrypt;
363
 
 
 
364	return crypto_register_template(&pcrypt_tmpl);
365
366err_deinit_pencrypt:
367	padata_free(pencrypt);
368err_unreg_kset:
369	kset_unregister(pcrypt_kset);
370err:
371	return err;
372}
373
374static void __exit pcrypt_exit(void)
375{
376	crypto_unregister_template(&pcrypt_tmpl);
377
378	padata_free(pencrypt);
379	padata_free(pdecrypt);
380
381	kset_unregister(pcrypt_kset);
 
382}
383
384subsys_initcall(pcrypt_init);
385module_exit(pcrypt_exit);
386
387MODULE_LICENSE("GPL");
388MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
389MODULE_DESCRIPTION("Parallel crypto wrapper");
390MODULE_ALIAS_CRYPTO("pcrypt");
v4.6
 
  1/*
  2 * pcrypt - Parallel crypto wrapper.
  3 *
  4 * Copyright (C) 2009 secunet Security Networks AG
  5 * Copyright (C) 2009 Steffen Klassert <steffen.klassert@secunet.com>
  6 *
  7 * This program is free software; you can redistribute it and/or modify it
  8 * under the terms and conditions of the GNU General Public License,
  9 * version 2, as published by the Free Software Foundation.
 10 *
 11 * This program is distributed in the hope it will be useful, but WITHOUT
 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 13 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 14 * more details.
 15 *
 16 * You should have received a copy of the GNU General Public License along with
 17 * this program; if not, write to the Free Software Foundation, Inc.,
 18 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
 19 */
 20
 21#include <crypto/algapi.h>
 22#include <crypto/internal/aead.h>
 23#include <linux/atomic.h>
 24#include <linux/err.h>
 25#include <linux/init.h>
 26#include <linux/module.h>
 27#include <linux/slab.h>
 28#include <linux/notifier.h>
 29#include <linux/kobject.h>
 30#include <linux/cpu.h>
 31#include <crypto/pcrypt.h>
 32
 33struct padata_pcrypt {
 34	struct padata_instance *pinst;
 35	struct workqueue_struct *wq;
 36
 37	/*
 38	 * Cpumask for callback CPUs. It should be
 39	 * equal to serial cpumask of corresponding padata instance,
 40	 * so it is updated when padata notifies us about serial
 41	 * cpumask change.
 42	 *
 43	 * cb_cpumask is protected by RCU. This fact prevents us from
 44	 * using cpumask_var_t directly because the actual type of
 45	 * cpumsak_var_t depends on kernel configuration(particularly on
 46	 * CONFIG_CPUMASK_OFFSTACK macro). Depending on the configuration
 47	 * cpumask_var_t may be either a pointer to the struct cpumask
 48	 * or a variable allocated on the stack. Thus we can not safely use
 49	 * cpumask_var_t with RCU operations such as rcu_assign_pointer or
 50	 * rcu_dereference. So cpumask_var_t is wrapped with struct
 51	 * pcrypt_cpumask which makes possible to use it with RCU.
 52	 */
 53	struct pcrypt_cpumask {
 54		cpumask_var_t mask;
 55	} *cb_cpumask;
 56	struct notifier_block nblock;
 57};
 58
 59static struct padata_pcrypt pencrypt;
 60static struct padata_pcrypt pdecrypt;
 61static struct kset           *pcrypt_kset;
 62
 63struct pcrypt_instance_ctx {
 64	struct crypto_aead_spawn spawn;
 
 
 65	atomic_t tfm_count;
 66};
 67
 68struct pcrypt_aead_ctx {
 69	struct crypto_aead *child;
 70	unsigned int cb_cpu;
 71};
 72
 73static int pcrypt_do_parallel(struct padata_priv *padata, unsigned int *cb_cpu,
 74			      struct padata_pcrypt *pcrypt)
 75{
 76	unsigned int cpu_index, cpu, i;
 77	struct pcrypt_cpumask *cpumask;
 78
 79	cpu = *cb_cpu;
 80
 81	rcu_read_lock_bh();
 82	cpumask = rcu_dereference_bh(pcrypt->cb_cpumask);
 83	if (cpumask_test_cpu(cpu, cpumask->mask))
 84			goto out;
 85
 86	if (!cpumask_weight(cpumask->mask))
 87			goto out;
 88
 89	cpu_index = cpu % cpumask_weight(cpumask->mask);
 90
 91	cpu = cpumask_first(cpumask->mask);
 92	for (i = 0; i < cpu_index; i++)
 93		cpu = cpumask_next(cpu, cpumask->mask);
 94
 95	*cb_cpu = cpu;
 96
 97out:
 98	rcu_read_unlock_bh();
 99	return padata_do_parallel(pcrypt->pinst, padata, cpu);
100}
101
102static int pcrypt_aead_setkey(struct crypto_aead *parent,
103			      const u8 *key, unsigned int keylen)
104{
105	struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent);
106
107	return crypto_aead_setkey(ctx->child, key, keylen);
108}
109
110static int pcrypt_aead_setauthsize(struct crypto_aead *parent,
111				   unsigned int authsize)
112{
113	struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent);
114
115	return crypto_aead_setauthsize(ctx->child, authsize);
116}
117
118static void pcrypt_aead_serial(struct padata_priv *padata)
119{
120	struct pcrypt_request *preq = pcrypt_padata_request(padata);
121	struct aead_request *req = pcrypt_request_ctx(preq);
122
123	aead_request_complete(req->base.data, padata->info);
124}
125
126static void pcrypt_aead_done(struct crypto_async_request *areq, int err)
127{
128	struct aead_request *req = areq->data;
129	struct pcrypt_request *preq = aead_request_ctx(req);
130	struct padata_priv *padata = pcrypt_request_padata(preq);
131
132	padata->info = err;
133	req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
134
135	padata_do_serial(padata);
136}
137
138static void pcrypt_aead_enc(struct padata_priv *padata)
139{
140	struct pcrypt_request *preq = pcrypt_padata_request(padata);
141	struct aead_request *req = pcrypt_request_ctx(preq);
 
142
143	padata->info = crypto_aead_encrypt(req);
144
145	if (padata->info == -EINPROGRESS)
146		return;
147
 
148	padata_do_serial(padata);
149}
150
151static int pcrypt_aead_encrypt(struct aead_request *req)
152{
153	int err;
154	struct pcrypt_request *preq = aead_request_ctx(req);
155	struct aead_request *creq = pcrypt_request_ctx(preq);
156	struct padata_priv *padata = pcrypt_request_padata(preq);
157	struct crypto_aead *aead = crypto_aead_reqtfm(req);
158	struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
159	u32 flags = aead_request_flags(req);
 
 
 
160
161	memset(padata, 0, sizeof(struct padata_priv));
162
163	padata->parallel = pcrypt_aead_enc;
164	padata->serial = pcrypt_aead_serial;
165
166	aead_request_set_tfm(creq, ctx->child);
167	aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
168				  pcrypt_aead_done, req);
169	aead_request_set_crypt(creq, req->src, req->dst,
170			       req->cryptlen, req->iv);
171	aead_request_set_ad(creq, req->assoclen);
172
173	err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt);
174	if (!err)
175		return -EINPROGRESS;
 
 
 
 
176
177	return err;
178}
179
180static void pcrypt_aead_dec(struct padata_priv *padata)
181{
182	struct pcrypt_request *preq = pcrypt_padata_request(padata);
183	struct aead_request *req = pcrypt_request_ctx(preq);
 
184
185	padata->info = crypto_aead_decrypt(req);
186
187	if (padata->info == -EINPROGRESS)
188		return;
189
 
190	padata_do_serial(padata);
191}
192
193static int pcrypt_aead_decrypt(struct aead_request *req)
194{
195	int err;
196	struct pcrypt_request *preq = aead_request_ctx(req);
197	struct aead_request *creq = pcrypt_request_ctx(preq);
198	struct padata_priv *padata = pcrypt_request_padata(preq);
199	struct crypto_aead *aead = crypto_aead_reqtfm(req);
200	struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
201	u32 flags = aead_request_flags(req);
 
 
 
202
203	memset(padata, 0, sizeof(struct padata_priv));
204
205	padata->parallel = pcrypt_aead_dec;
206	padata->serial = pcrypt_aead_serial;
207
208	aead_request_set_tfm(creq, ctx->child);
209	aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
210				  pcrypt_aead_done, req);
211	aead_request_set_crypt(creq, req->src, req->dst,
212			       req->cryptlen, req->iv);
213	aead_request_set_ad(creq, req->assoclen);
214
215	err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pdecrypt);
216	if (!err)
217		return -EINPROGRESS;
 
 
 
 
218
219	return err;
220}
221
222static int pcrypt_aead_init_tfm(struct crypto_aead *tfm)
223{
224	int cpu, cpu_index;
225	struct aead_instance *inst = aead_alg_instance(tfm);
226	struct pcrypt_instance_ctx *ictx = aead_instance_ctx(inst);
227	struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(tfm);
228	struct crypto_aead *cipher;
229
230	cpu_index = (unsigned int)atomic_inc_return(&ictx->tfm_count) %
231		    cpumask_weight(cpu_online_mask);
232
233	ctx->cb_cpu = cpumask_first(cpu_online_mask);
234	for (cpu = 0; cpu < cpu_index; cpu++)
235		ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_online_mask);
236
237	cipher = crypto_spawn_aead(&ictx->spawn);
238
239	if (IS_ERR(cipher))
240		return PTR_ERR(cipher);
241
242	ctx->child = cipher;
243	crypto_aead_set_reqsize(tfm, sizeof(struct pcrypt_request) +
244				     sizeof(struct aead_request) +
245				     crypto_aead_reqsize(cipher));
246
247	return 0;
248}
249
250static void pcrypt_aead_exit_tfm(struct crypto_aead *tfm)
251{
252	struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(tfm);
253
254	crypto_free_aead(ctx->child);
255}
256
 
 
 
 
 
 
 
 
 
 
257static int pcrypt_init_instance(struct crypto_instance *inst,
258				struct crypto_alg *alg)
259{
260	if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
261		     "pcrypt(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
262		return -ENAMETOOLONG;
263
264	memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
265
266	inst->alg.cra_priority = alg->cra_priority + 100;
267	inst->alg.cra_blocksize = alg->cra_blocksize;
268	inst->alg.cra_alignmask = alg->cra_alignmask;
269
270	return 0;
271}
272
273static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb,
274			      u32 type, u32 mask)
275{
276	struct pcrypt_instance_ctx *ctx;
277	struct crypto_attr_type *algt;
278	struct aead_instance *inst;
279	struct aead_alg *alg;
280	const char *name;
281	int err;
282
283	algt = crypto_get_attr_type(tb);
284	if (IS_ERR(algt))
285		return PTR_ERR(algt);
286
287	name = crypto_attr_alg_name(tb[1]);
288	if (IS_ERR(name))
289		return PTR_ERR(name);
290
291	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
292	if (!inst)
293		return -ENOMEM;
294
 
 
295	ctx = aead_instance_ctx(inst);
296	crypto_set_aead_spawn(&ctx->spawn, aead_crypto_instance(inst));
 
 
 
 
 
 
297
298	err = crypto_grab_aead(&ctx->spawn, name, 0, 0);
 
299	if (err)
300		goto out_free_inst;
301
302	alg = crypto_spawn_aead_alg(&ctx->spawn);
303	err = pcrypt_init_instance(aead_crypto_instance(inst), &alg->base);
304	if (err)
305		goto out_drop_aead;
306
307	inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC;
308
309	inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
310	inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
311
312	inst->alg.base.cra_ctxsize = sizeof(struct pcrypt_aead_ctx);
313
314	inst->alg.init = pcrypt_aead_init_tfm;
315	inst->alg.exit = pcrypt_aead_exit_tfm;
316
317	inst->alg.setkey = pcrypt_aead_setkey;
318	inst->alg.setauthsize = pcrypt_aead_setauthsize;
319	inst->alg.encrypt = pcrypt_aead_encrypt;
320	inst->alg.decrypt = pcrypt_aead_decrypt;
321
 
 
322	err = aead_register_instance(tmpl, inst);
323	if (err)
324		goto out_drop_aead;
325
326out:
327	return err;
328
329out_drop_aead:
330	crypto_drop_aead(&ctx->spawn);
331out_free_inst:
332	kfree(inst);
333	goto out;
334}
335
336static int pcrypt_create(struct crypto_template *tmpl, struct rtattr **tb)
337{
338	struct crypto_attr_type *algt;
339
340	algt = crypto_get_attr_type(tb);
341	if (IS_ERR(algt))
342		return PTR_ERR(algt);
343
344	switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
345	case CRYPTO_ALG_TYPE_AEAD:
346		return pcrypt_create_aead(tmpl, tb, algt->type, algt->mask);
347	}
348
349	return -EINVAL;
350}
351
352static void pcrypt_free(struct crypto_instance *inst)
353{
354	struct pcrypt_instance_ctx *ctx = crypto_instance_ctx(inst);
355
356	crypto_drop_aead(&ctx->spawn);
357	kfree(inst);
358}
359
360static int pcrypt_cpumask_change_notify(struct notifier_block *self,
361					unsigned long val, void *data)
362{
363	struct padata_pcrypt *pcrypt;
364	struct pcrypt_cpumask *new_mask, *old_mask;
365	struct padata_cpumask *cpumask = (struct padata_cpumask *)data;
366
367	if (!(val & PADATA_CPU_SERIAL))
368		return 0;
369
370	pcrypt = container_of(self, struct padata_pcrypt, nblock);
371	new_mask = kmalloc(sizeof(*new_mask), GFP_KERNEL);
372	if (!new_mask)
373		return -ENOMEM;
374	if (!alloc_cpumask_var(&new_mask->mask, GFP_KERNEL)) {
375		kfree(new_mask);
376		return -ENOMEM;
377	}
378
379	old_mask = pcrypt->cb_cpumask;
380
381	cpumask_copy(new_mask->mask, cpumask->cbcpu);
382	rcu_assign_pointer(pcrypt->cb_cpumask, new_mask);
383	synchronize_rcu_bh();
384
385	free_cpumask_var(old_mask->mask);
386	kfree(old_mask);
387	return 0;
388}
389
390static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
391{
392	int ret;
393
394	pinst->kobj.kset = pcrypt_kset;
395	ret = kobject_add(&pinst->kobj, NULL, name);
396	if (!ret)
397		kobject_uevent(&pinst->kobj, KOBJ_ADD);
398
399	return ret;
400}
401
402static int pcrypt_init_padata(struct padata_pcrypt *pcrypt,
403			      const char *name)
404{
405	int ret = -ENOMEM;
406	struct pcrypt_cpumask *mask;
407
408	get_online_cpus();
409
410	pcrypt->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE,
411				     1, name);
412	if (!pcrypt->wq)
413		goto err;
414
415	pcrypt->pinst = padata_alloc_possible(pcrypt->wq);
416	if (!pcrypt->pinst)
417		goto err_destroy_workqueue;
418
419	mask = kmalloc(sizeof(*mask), GFP_KERNEL);
420	if (!mask)
421		goto err_free_padata;
422	if (!alloc_cpumask_var(&mask->mask, GFP_KERNEL)) {
423		kfree(mask);
424		goto err_free_padata;
425	}
426
427	cpumask_and(mask->mask, cpu_possible_mask, cpu_online_mask);
428	rcu_assign_pointer(pcrypt->cb_cpumask, mask);
429
430	pcrypt->nblock.notifier_call = pcrypt_cpumask_change_notify;
431	ret = padata_register_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
432	if (ret)
433		goto err_free_cpumask;
434
435	ret = pcrypt_sysfs_add(pcrypt->pinst, name);
436	if (ret)
437		goto err_unregister_notifier;
438
439	put_online_cpus();
440
441	return ret;
442
443err_unregister_notifier:
444	padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
445err_free_cpumask:
446	free_cpumask_var(mask->mask);
447	kfree(mask);
448err_free_padata:
449	padata_free(pcrypt->pinst);
450err_destroy_workqueue:
451	destroy_workqueue(pcrypt->wq);
452err:
453	put_online_cpus();
454
455	return ret;
456}
457
458static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt)
459{
460	free_cpumask_var(pcrypt->cb_cpumask->mask);
461	kfree(pcrypt->cb_cpumask);
462
463	padata_stop(pcrypt->pinst);
464	padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
465	destroy_workqueue(pcrypt->wq);
466	padata_free(pcrypt->pinst);
467}
468
469static struct crypto_template pcrypt_tmpl = {
470	.name = "pcrypt",
471	.create = pcrypt_create,
472	.free = pcrypt_free,
473	.module = THIS_MODULE,
474};
475
476static int __init pcrypt_init(void)
477{
478	int err = -ENOMEM;
479
480	pcrypt_kset = kset_create_and_add("pcrypt", NULL, kernel_kobj);
481	if (!pcrypt_kset)
482		goto err;
483
484	err = pcrypt_init_padata(&pencrypt, "pencrypt");
485	if (err)
486		goto err_unreg_kset;
487
488	err = pcrypt_init_padata(&pdecrypt, "pdecrypt");
489	if (err)
490		goto err_deinit_pencrypt;
491
492	padata_start(pencrypt.pinst);
493	padata_start(pdecrypt.pinst);
494
495	return crypto_register_template(&pcrypt_tmpl);
496
497err_deinit_pencrypt:
498	pcrypt_fini_padata(&pencrypt);
499err_unreg_kset:
500	kset_unregister(pcrypt_kset);
501err:
502	return err;
503}
504
505static void __exit pcrypt_exit(void)
506{
507	pcrypt_fini_padata(&pencrypt);
508	pcrypt_fini_padata(&pdecrypt);
 
 
509
510	kset_unregister(pcrypt_kset);
511	crypto_unregister_template(&pcrypt_tmpl);
512}
513
514module_init(pcrypt_init);
515module_exit(pcrypt_exit);
516
517MODULE_LICENSE("GPL");
518MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
519MODULE_DESCRIPTION("Parallel crypto wrapper");
520MODULE_ALIAS_CRYPTO("pcrypt");