Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * RSA padding templates.
  4 *
  5 * Copyright (c) 2015  Intel Corporation
 
 
 
 
 
  6 */
  7
  8#include <crypto/algapi.h>
  9#include <crypto/akcipher.h>
 10#include <crypto/internal/akcipher.h>
 11#include <crypto/internal/rsa.h>
 12#include <linux/err.h>
 13#include <linux/init.h>
 14#include <linux/kernel.h>
 15#include <linux/module.h>
 16#include <linux/random.h>
 17
 18/*
 19 * Hash algorithm OIDs plus ASN.1 DER wrappings [RFC4880 sec 5.2.2].
 20 */
 21static const u8 rsa_digest_info_md5[] = {
 22	0x30, 0x20, 0x30, 0x0c, 0x06, 0x08,
 23	0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x05, /* OID */
 24	0x05, 0x00, 0x04, 0x10
 25};
 26
 27static const u8 rsa_digest_info_sha1[] = {
 28	0x30, 0x21, 0x30, 0x09, 0x06, 0x05,
 29	0x2b, 0x0e, 0x03, 0x02, 0x1a,
 30	0x05, 0x00, 0x04, 0x14
 31};
 32
 33static const u8 rsa_digest_info_rmd160[] = {
 34	0x30, 0x21, 0x30, 0x09, 0x06, 0x05,
 35	0x2b, 0x24, 0x03, 0x02, 0x01,
 36	0x05, 0x00, 0x04, 0x14
 37};
 38
 39static const u8 rsa_digest_info_sha224[] = {
 40	0x30, 0x2d, 0x30, 0x0d, 0x06, 0x09,
 41	0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04,
 42	0x05, 0x00, 0x04, 0x1c
 43};
 44
 45static const u8 rsa_digest_info_sha256[] = {
 46	0x30, 0x31, 0x30, 0x0d, 0x06, 0x09,
 47	0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01,
 48	0x05, 0x00, 0x04, 0x20
 49};
 50
 51static const u8 rsa_digest_info_sha384[] = {
 52	0x30, 0x41, 0x30, 0x0d, 0x06, 0x09,
 53	0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02,
 54	0x05, 0x00, 0x04, 0x30
 55};
 56
 57static const u8 rsa_digest_info_sha512[] = {
 58	0x30, 0x51, 0x30, 0x0d, 0x06, 0x09,
 59	0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03,
 60	0x05, 0x00, 0x04, 0x40
 61};
 62
 63static const struct rsa_asn1_template {
 64	const char	*name;
 65	const u8	*data;
 66	size_t		size;
 67} rsa_asn1_templates[] = {
 68#define _(X) { #X, rsa_digest_info_##X, sizeof(rsa_digest_info_##X) }
 69	_(md5),
 70	_(sha1),
 71	_(rmd160),
 72	_(sha256),
 73	_(sha384),
 74	_(sha512),
 75	_(sha224),
 76	{ NULL }
 77#undef _
 78};
 79
 80static const struct rsa_asn1_template *rsa_lookup_asn1(const char *name)
 81{
 82	const struct rsa_asn1_template *p;
 83
 84	for (p = rsa_asn1_templates; p->name; p++)
 85		if (strcmp(name, p->name) == 0)
 86			return p;
 87	return NULL;
 88}
 89
 90struct pkcs1pad_ctx {
 91	struct crypto_akcipher *child;
 92	unsigned int key_size;
 93};
 94
 95struct pkcs1pad_inst_ctx {
 96	struct crypto_akcipher_spawn spawn;
 97	const struct rsa_asn1_template *digest_info;
 98};
 99
100struct pkcs1pad_request {
101	struct scatterlist in_sg[2], out_sg[1];
102	uint8_t *in_buf, *out_buf;
103	struct akcipher_request child_req;
104};
105
106static int pkcs1pad_set_pub_key(struct crypto_akcipher *tfm, const void *key,
107		unsigned int keylen)
108{
109	struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
110	int err;
111
112	ctx->key_size = 0;
113
114	err = crypto_akcipher_set_pub_key(ctx->child, key, keylen);
115	if (err)
116		return err;
117
118	/* Find out new modulus size from rsa implementation */
119	err = crypto_akcipher_maxsize(ctx->child);
120	if (err > PAGE_SIZE)
121		return -ENOTSUPP;
122
123	ctx->key_size = err;
124	return 0;
125}
126
127static int pkcs1pad_set_priv_key(struct crypto_akcipher *tfm, const void *key,
128		unsigned int keylen)
129{
130	struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
131	int err;
132
133	ctx->key_size = 0;
134
135	err = crypto_akcipher_set_priv_key(ctx->child, key, keylen);
136	if (err)
137		return err;
138
139	/* Find out new modulus size from rsa implementation */
140	err = crypto_akcipher_maxsize(ctx->child);
141	if (err > PAGE_SIZE)
142		return -ENOTSUPP;
143
144	ctx->key_size = err;
145	return 0;
146}
147
148static unsigned int pkcs1pad_get_max_size(struct crypto_akcipher *tfm)
149{
150	struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
151
152	/*
153	 * The maximum destination buffer size for the encrypt/sign operations
154	 * will be the same as for RSA, even though it's smaller for
155	 * decrypt/verify.
156	 */
157
158	return ctx->key_size;
159}
160
161static void pkcs1pad_sg_set_buf(struct scatterlist *sg, void *buf, size_t len,
162		struct scatterlist *next)
163{
164	int nsegs = next ? 2 : 1;
165
166	sg_init_table(sg, nsegs);
167	sg_set_buf(sg, buf, len);
168
169	if (next)
170		sg_chain(sg, nsegs, next);
171}
172
173static int pkcs1pad_encrypt_sign_complete(struct akcipher_request *req, int err)
174{
175	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
176	struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
177	struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
178	unsigned int pad_len;
179	unsigned int len;
180	u8 *out_buf;
181
182	if (err)
183		goto out;
184
185	len = req_ctx->child_req.dst_len;
186	pad_len = ctx->key_size - len;
187
188	/* Four billion to one */
189	if (likely(!pad_len))
190		goto out;
191
192	out_buf = kzalloc(ctx->key_size, GFP_KERNEL);
193	err = -ENOMEM;
194	if (!out_buf)
195		goto out;
196
197	sg_copy_to_buffer(req->dst, sg_nents_for_len(req->dst, len),
198			  out_buf + pad_len, len);
199	sg_copy_from_buffer(req->dst,
200			    sg_nents_for_len(req->dst, ctx->key_size),
201			    out_buf, ctx->key_size);
202	kzfree(out_buf);
203
204out:
205	req->dst_len = ctx->key_size;
206
207	kfree(req_ctx->in_buf);
208
209	return err;
210}
211
212static void pkcs1pad_encrypt_sign_complete_cb(
213		struct crypto_async_request *child_async_req, int err)
214{
215	struct akcipher_request *req = child_async_req->data;
216	struct crypto_async_request async_req;
217
218	if (err == -EINPROGRESS)
219		return;
220
221	async_req.data = req->base.data;
222	async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
223	async_req.flags = child_async_req->flags;
224	req->base.complete(&async_req,
225			pkcs1pad_encrypt_sign_complete(req, err));
226}
227
228static int pkcs1pad_encrypt(struct akcipher_request *req)
229{
230	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
231	struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
232	struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
233	int err;
234	unsigned int i, ps_end;
235
236	if (!ctx->key_size)
237		return -EINVAL;
238
239	if (req->src_len > ctx->key_size - 11)
240		return -EOVERFLOW;
241
242	if (req->dst_len < ctx->key_size) {
243		req->dst_len = ctx->key_size;
244		return -EOVERFLOW;
245	}
246
247	req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len,
248				  GFP_KERNEL);
249	if (!req_ctx->in_buf)
250		return -ENOMEM;
251
252	ps_end = ctx->key_size - req->src_len - 2;
253	req_ctx->in_buf[0] = 0x02;
254	for (i = 1; i < ps_end; i++)
255		req_ctx->in_buf[i] = 1 + prandom_u32_max(255);
256	req_ctx->in_buf[ps_end] = 0x00;
257
258	pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
259			ctx->key_size - 1 - req->src_len, req->src);
260
 
 
 
 
 
 
 
 
 
261	akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
262	akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
263			pkcs1pad_encrypt_sign_complete_cb, req);
264
265	/* Reuse output buffer */
266	akcipher_request_set_crypt(&req_ctx->child_req, req_ctx->in_sg,
267				   req->dst, ctx->key_size - 1, req->dst_len);
268
269	err = crypto_akcipher_encrypt(&req_ctx->child_req);
270	if (err != -EINPROGRESS && err != -EBUSY)
271		return pkcs1pad_encrypt_sign_complete(req, err);
272
273	return err;
274}
275
276static int pkcs1pad_decrypt_complete(struct akcipher_request *req, int err)
277{
278	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
279	struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
280	struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
281	unsigned int dst_len;
282	unsigned int pos;
283	u8 *out_buf;
284
285	if (err)
286		goto done;
287
288	err = -EINVAL;
289	dst_len = req_ctx->child_req.dst_len;
290	if (dst_len < ctx->key_size - 1)
291		goto done;
292
293	out_buf = req_ctx->out_buf;
294	if (dst_len == ctx->key_size) {
295		if (out_buf[0] != 0x00)
296			/* Decrypted value had no leading 0 byte */
297			goto done;
298
299		dst_len--;
300		out_buf++;
301	}
302
303	if (out_buf[0] != 0x02)
304		goto done;
305
306	for (pos = 1; pos < dst_len; pos++)
307		if (out_buf[pos] == 0x00)
308			break;
309	if (pos < 9 || pos == dst_len)
310		goto done;
311	pos++;
312
313	err = 0;
314
315	if (req->dst_len < dst_len - pos)
316		err = -EOVERFLOW;
317	req->dst_len = dst_len - pos;
318
319	if (!err)
320		sg_copy_from_buffer(req->dst,
321				sg_nents_for_len(req->dst, req->dst_len),
322				out_buf + pos, req->dst_len);
323
324done:
325	kzfree(req_ctx->out_buf);
326
327	return err;
328}
329
330static void pkcs1pad_decrypt_complete_cb(
331		struct crypto_async_request *child_async_req, int err)
332{
333	struct akcipher_request *req = child_async_req->data;
334	struct crypto_async_request async_req;
335
336	if (err == -EINPROGRESS)
337		return;
338
339	async_req.data = req->base.data;
340	async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
341	async_req.flags = child_async_req->flags;
342	req->base.complete(&async_req, pkcs1pad_decrypt_complete(req, err));
343}
344
345static int pkcs1pad_decrypt(struct akcipher_request *req)
346{
347	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
348	struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
349	struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
350	int err;
351
352	if (!ctx->key_size || req->src_len != ctx->key_size)
353		return -EINVAL;
354
355	req_ctx->out_buf = kmalloc(ctx->key_size, GFP_KERNEL);
356	if (!req_ctx->out_buf)
357		return -ENOMEM;
358
359	pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
360			    ctx->key_size, NULL);
361
362	akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
363	akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
364			pkcs1pad_decrypt_complete_cb, req);
365
366	/* Reuse input buffer, output to a new buffer */
367	akcipher_request_set_crypt(&req_ctx->child_req, req->src,
368				   req_ctx->out_sg, req->src_len,
369				   ctx->key_size);
370
371	err = crypto_akcipher_decrypt(&req_ctx->child_req);
372	if (err != -EINPROGRESS && err != -EBUSY)
373		return pkcs1pad_decrypt_complete(req, err);
374
375	return err;
376}
377
378static int pkcs1pad_sign(struct akcipher_request *req)
379{
380	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
381	struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
382	struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
383	struct akcipher_instance *inst = akcipher_alg_instance(tfm);
384	struct pkcs1pad_inst_ctx *ictx = akcipher_instance_ctx(inst);
385	const struct rsa_asn1_template *digest_info = ictx->digest_info;
386	int err;
387	unsigned int ps_end, digest_size = 0;
388
389	if (!ctx->key_size)
390		return -EINVAL;
391
392	if (digest_info)
393		digest_size = digest_info->size;
394
395	if (req->src_len + digest_size > ctx->key_size - 11)
396		return -EOVERFLOW;
397
398	if (req->dst_len < ctx->key_size) {
399		req->dst_len = ctx->key_size;
400		return -EOVERFLOW;
401	}
402
403	req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len,
404				  GFP_KERNEL);
405	if (!req_ctx->in_buf)
406		return -ENOMEM;
407
408	ps_end = ctx->key_size - digest_size - req->src_len - 2;
409	req_ctx->in_buf[0] = 0x01;
410	memset(req_ctx->in_buf + 1, 0xff, ps_end - 1);
411	req_ctx->in_buf[ps_end] = 0x00;
412
413	if (digest_info)
414		memcpy(req_ctx->in_buf + ps_end + 1, digest_info->data,
415		       digest_info->size);
416
417	pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
418			ctx->key_size - 1 - req->src_len, req->src);
419
420	akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
421	akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
422			pkcs1pad_encrypt_sign_complete_cb, req);
423
424	/* Reuse output buffer */
425	akcipher_request_set_crypt(&req_ctx->child_req, req_ctx->in_sg,
426				   req->dst, ctx->key_size - 1, req->dst_len);
427
428	err = crypto_akcipher_decrypt(&req_ctx->child_req);
429	if (err != -EINPROGRESS && err != -EBUSY)
430		return pkcs1pad_encrypt_sign_complete(req, err);
431
432	return err;
433}
434
435static int pkcs1pad_verify_complete(struct akcipher_request *req, int err)
436{
437	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
438	struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
439	struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
440	struct akcipher_instance *inst = akcipher_alg_instance(tfm);
441	struct pkcs1pad_inst_ctx *ictx = akcipher_instance_ctx(inst);
442	const struct rsa_asn1_template *digest_info = ictx->digest_info;
443	unsigned int dst_len;
444	unsigned int pos;
445	u8 *out_buf;
446
447	if (err)
448		goto done;
449
450	err = -EINVAL;
451	dst_len = req_ctx->child_req.dst_len;
452	if (dst_len < ctx->key_size - 1)
453		goto done;
454
455	out_buf = req_ctx->out_buf;
456	if (dst_len == ctx->key_size) {
457		if (out_buf[0] != 0x00)
458			/* Decrypted value had no leading 0 byte */
459			goto done;
460
461		dst_len--;
462		out_buf++;
463	}
464
465	err = -EBADMSG;
466	if (out_buf[0] != 0x01)
467		goto done;
468
469	for (pos = 1; pos < dst_len; pos++)
470		if (out_buf[pos] != 0xff)
471			break;
472
473	if (pos < 9 || pos == dst_len || out_buf[pos] != 0x00)
474		goto done;
475	pos++;
476
477	if (digest_info) {
478		if (crypto_memneq(out_buf + pos, digest_info->data,
479				  digest_info->size))
480			goto done;
481
482		pos += digest_info->size;
483	}
484
485	err = 0;
486
487	if (req->dst_len != dst_len - pos) {
488		err = -EKEYREJECTED;
489		req->dst_len = dst_len - pos;
490		goto done;
491	}
492	/* Extract appended digest. */
493	sg_pcopy_to_buffer(req->src,
494			   sg_nents_for_len(req->src,
495					    req->src_len + req->dst_len),
496			   req_ctx->out_buf + ctx->key_size,
497			   req->dst_len, ctx->key_size);
498	/* Do the actual verification step. */
499	if (memcmp(req_ctx->out_buf + ctx->key_size, out_buf + pos,
500		   req->dst_len) != 0)
501		err = -EKEYREJECTED;
502done:
503	kzfree(req_ctx->out_buf);
504
505	return err;
506}
507
508static void pkcs1pad_verify_complete_cb(
509		struct crypto_async_request *child_async_req, int err)
510{
511	struct akcipher_request *req = child_async_req->data;
512	struct crypto_async_request async_req;
513
514	if (err == -EINPROGRESS)
515		return;
516
517	async_req.data = req->base.data;
518	async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
519	async_req.flags = child_async_req->flags;
520	req->base.complete(&async_req, pkcs1pad_verify_complete(req, err));
521}
522
523/*
524 * The verify operation is here for completeness similar to the verification
525 * defined in RFC2313 section 10.2 except that block type 0 is not accepted,
526 * as in RFC2437.  RFC2437 section 9.2 doesn't define any operation to
527 * retrieve the DigestInfo from a signature, instead the user is expected
528 * to call the sign operation to generate the expected signature and compare
529 * signatures instead of the message-digests.
530 */
531static int pkcs1pad_verify(struct akcipher_request *req)
532{
533	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
534	struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
535	struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
536	int err;
537
538	if (WARN_ON(req->dst) ||
539	    WARN_ON(!req->dst_len) ||
540	    !ctx->key_size || req->src_len < ctx->key_size)
541		return -EINVAL;
542
543	req_ctx->out_buf = kmalloc(ctx->key_size + req->dst_len, GFP_KERNEL);
544	if (!req_ctx->out_buf)
545		return -ENOMEM;
546
547	pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
548			    ctx->key_size, NULL);
549
550	akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
551	akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
552			pkcs1pad_verify_complete_cb, req);
553
554	/* Reuse input buffer, output to a new buffer */
555	akcipher_request_set_crypt(&req_ctx->child_req, req->src,
556				   req_ctx->out_sg, req->src_len,
557				   ctx->key_size);
558
559	err = crypto_akcipher_encrypt(&req_ctx->child_req);
560	if (err != -EINPROGRESS && err != -EBUSY)
561		return pkcs1pad_verify_complete(req, err);
562
563	return err;
564}
565
566static int pkcs1pad_init_tfm(struct crypto_akcipher *tfm)
567{
568	struct akcipher_instance *inst = akcipher_alg_instance(tfm);
569	struct pkcs1pad_inst_ctx *ictx = akcipher_instance_ctx(inst);
570	struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
571	struct crypto_akcipher *child_tfm;
572
573	child_tfm = crypto_spawn_akcipher(&ictx->spawn);
574	if (IS_ERR(child_tfm))
575		return PTR_ERR(child_tfm);
576
577	ctx->child = child_tfm;
578	return 0;
579}
580
581static void pkcs1pad_exit_tfm(struct crypto_akcipher *tfm)
582{
583	struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
584
585	crypto_free_akcipher(ctx->child);
586}
587
588static void pkcs1pad_free(struct akcipher_instance *inst)
589{
590	struct pkcs1pad_inst_ctx *ctx = akcipher_instance_ctx(inst);
591	struct crypto_akcipher_spawn *spawn = &ctx->spawn;
592
593	crypto_drop_akcipher(spawn);
594	kfree(inst);
595}
596
597static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
598{
599	const struct rsa_asn1_template *digest_info;
600	struct crypto_attr_type *algt;
601	struct akcipher_instance *inst;
602	struct pkcs1pad_inst_ctx *ctx;
603	struct crypto_akcipher_spawn *spawn;
604	struct akcipher_alg *rsa_alg;
605	const char *rsa_alg_name;
606	const char *hash_name;
607	int err;
608
609	algt = crypto_get_attr_type(tb);
610	if (IS_ERR(algt))
611		return PTR_ERR(algt);
612
613	if ((algt->type ^ CRYPTO_ALG_TYPE_AKCIPHER) & algt->mask)
614		return -EINVAL;
615
616	rsa_alg_name = crypto_attr_alg_name(tb[1]);
617	if (IS_ERR(rsa_alg_name))
618		return PTR_ERR(rsa_alg_name);
619
620	hash_name = crypto_attr_alg_name(tb[2]);
621	if (IS_ERR(hash_name))
622		hash_name = NULL;
623
624	if (hash_name) {
625		digest_info = rsa_lookup_asn1(hash_name);
626		if (!digest_info)
627			return -EINVAL;
628	} else
629		digest_info = NULL;
630
631	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
632	if (!inst)
633		return -ENOMEM;
634
635	ctx = akcipher_instance_ctx(inst);
636	spawn = &ctx->spawn;
637	ctx->digest_info = digest_info;
638
639	crypto_set_spawn(&spawn->base, akcipher_crypto_instance(inst));
640	err = crypto_grab_akcipher(spawn, rsa_alg_name, 0,
641			crypto_requires_sync(algt->type, algt->mask));
642	if (err)
643		goto out_free_inst;
644
645	rsa_alg = crypto_spawn_akcipher_alg(spawn);
646
647	err = -ENAMETOOLONG;
648
649	if (!hash_name) {
650		if (snprintf(inst->alg.base.cra_name,
651			     CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)",
652			     rsa_alg->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
653			goto out_drop_alg;
654
655		if (snprintf(inst->alg.base.cra_driver_name,
656			     CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)",
657			     rsa_alg->base.cra_driver_name) >=
658			     CRYPTO_MAX_ALG_NAME)
659			goto out_drop_alg;
660	} else {
661		if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
662			     "pkcs1pad(%s,%s)", rsa_alg->base.cra_name,
663			     hash_name) >= CRYPTO_MAX_ALG_NAME)
664			goto out_drop_alg;
665
666		if (snprintf(inst->alg.base.cra_driver_name,
667			     CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s,%s)",
668			     rsa_alg->base.cra_driver_name,
669			     hash_name) >= CRYPTO_MAX_ALG_NAME)
670			goto out_drop_alg;
671	}
672
673	inst->alg.base.cra_flags = rsa_alg->base.cra_flags & CRYPTO_ALG_ASYNC;
674	inst->alg.base.cra_priority = rsa_alg->base.cra_priority;
675	inst->alg.base.cra_ctxsize = sizeof(struct pkcs1pad_ctx);
676
677	inst->alg.init = pkcs1pad_init_tfm;
678	inst->alg.exit = pkcs1pad_exit_tfm;
679
680	inst->alg.encrypt = pkcs1pad_encrypt;
681	inst->alg.decrypt = pkcs1pad_decrypt;
682	inst->alg.sign = pkcs1pad_sign;
683	inst->alg.verify = pkcs1pad_verify;
684	inst->alg.set_pub_key = pkcs1pad_set_pub_key;
685	inst->alg.set_priv_key = pkcs1pad_set_priv_key;
686	inst->alg.max_size = pkcs1pad_get_max_size;
687	inst->alg.reqsize = sizeof(struct pkcs1pad_request) + rsa_alg->reqsize;
688
689	inst->free = pkcs1pad_free;
690
691	err = akcipher_register_instance(tmpl, inst);
692	if (err)
693		goto out_drop_alg;
694
695	return 0;
696
697out_drop_alg:
698	crypto_drop_akcipher(spawn);
699out_free_inst:
700	kfree(inst);
701	return err;
702}
703
704struct crypto_template rsa_pkcs1pad_tmpl = {
705	.name = "pkcs1pad",
706	.create = pkcs1pad_create,
707	.module = THIS_MODULE,
708};
v4.17
 
  1/*
  2 * RSA padding templates.
  3 *
  4 * Copyright (c) 2015  Intel Corporation
  5 *
  6 * This program is free software; you can redistribute it and/or modify it
  7 * under the terms of the GNU General Public License as published by the Free
  8 * Software Foundation; either version 2 of the License, or (at your option)
  9 * any later version.
 10 */
 11
 12#include <crypto/algapi.h>
 13#include <crypto/akcipher.h>
 14#include <crypto/internal/akcipher.h>
 
 15#include <linux/err.h>
 16#include <linux/init.h>
 17#include <linux/kernel.h>
 18#include <linux/module.h>
 19#include <linux/random.h>
 20
 21/*
 22 * Hash algorithm OIDs plus ASN.1 DER wrappings [RFC4880 sec 5.2.2].
 23 */
 24static const u8 rsa_digest_info_md5[] = {
 25	0x30, 0x20, 0x30, 0x0c, 0x06, 0x08,
 26	0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x05, /* OID */
 27	0x05, 0x00, 0x04, 0x10
 28};
 29
 30static const u8 rsa_digest_info_sha1[] = {
 31	0x30, 0x21, 0x30, 0x09, 0x06, 0x05,
 32	0x2b, 0x0e, 0x03, 0x02, 0x1a,
 33	0x05, 0x00, 0x04, 0x14
 34};
 35
 36static const u8 rsa_digest_info_rmd160[] = {
 37	0x30, 0x21, 0x30, 0x09, 0x06, 0x05,
 38	0x2b, 0x24, 0x03, 0x02, 0x01,
 39	0x05, 0x00, 0x04, 0x14
 40};
 41
 42static const u8 rsa_digest_info_sha224[] = {
 43	0x30, 0x2d, 0x30, 0x0d, 0x06, 0x09,
 44	0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04,
 45	0x05, 0x00, 0x04, 0x1c
 46};
 47
 48static const u8 rsa_digest_info_sha256[] = {
 49	0x30, 0x31, 0x30, 0x0d, 0x06, 0x09,
 50	0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01,
 51	0x05, 0x00, 0x04, 0x20
 52};
 53
 54static const u8 rsa_digest_info_sha384[] = {
 55	0x30, 0x41, 0x30, 0x0d, 0x06, 0x09,
 56	0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02,
 57	0x05, 0x00, 0x04, 0x30
 58};
 59
 60static const u8 rsa_digest_info_sha512[] = {
 61	0x30, 0x51, 0x30, 0x0d, 0x06, 0x09,
 62	0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03,
 63	0x05, 0x00, 0x04, 0x40
 64};
 65
 66static const struct rsa_asn1_template {
 67	const char	*name;
 68	const u8	*data;
 69	size_t		size;
 70} rsa_asn1_templates[] = {
 71#define _(X) { #X, rsa_digest_info_##X, sizeof(rsa_digest_info_##X) }
 72	_(md5),
 73	_(sha1),
 74	_(rmd160),
 75	_(sha256),
 76	_(sha384),
 77	_(sha512),
 78	_(sha224),
 79	{ NULL }
 80#undef _
 81};
 82
 83static const struct rsa_asn1_template *rsa_lookup_asn1(const char *name)
 84{
 85	const struct rsa_asn1_template *p;
 86
 87	for (p = rsa_asn1_templates; p->name; p++)
 88		if (strcmp(name, p->name) == 0)
 89			return p;
 90	return NULL;
 91}
 92
 93struct pkcs1pad_ctx {
 94	struct crypto_akcipher *child;
 95	unsigned int key_size;
 96};
 97
 98struct pkcs1pad_inst_ctx {
 99	struct crypto_akcipher_spawn spawn;
100	const struct rsa_asn1_template *digest_info;
101};
102
103struct pkcs1pad_request {
104	struct scatterlist in_sg[2], out_sg[1];
105	uint8_t *in_buf, *out_buf;
106	struct akcipher_request child_req;
107};
108
109static int pkcs1pad_set_pub_key(struct crypto_akcipher *tfm, const void *key,
110		unsigned int keylen)
111{
112	struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
113	int err;
114
115	ctx->key_size = 0;
116
117	err = crypto_akcipher_set_pub_key(ctx->child, key, keylen);
118	if (err)
119		return err;
120
121	/* Find out new modulus size from rsa implementation */
122	err = crypto_akcipher_maxsize(ctx->child);
123	if (err > PAGE_SIZE)
124		return -ENOTSUPP;
125
126	ctx->key_size = err;
127	return 0;
128}
129
130static int pkcs1pad_set_priv_key(struct crypto_akcipher *tfm, const void *key,
131		unsigned int keylen)
132{
133	struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
134	int err;
135
136	ctx->key_size = 0;
137
138	err = crypto_akcipher_set_priv_key(ctx->child, key, keylen);
139	if (err)
140		return err;
141
142	/* Find out new modulus size from rsa implementation */
143	err = crypto_akcipher_maxsize(ctx->child);
144	if (err > PAGE_SIZE)
145		return -ENOTSUPP;
146
147	ctx->key_size = err;
148	return 0;
149}
150
151static unsigned int pkcs1pad_get_max_size(struct crypto_akcipher *tfm)
152{
153	struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
154
155	/*
156	 * The maximum destination buffer size for the encrypt/sign operations
157	 * will be the same as for RSA, even though it's smaller for
158	 * decrypt/verify.
159	 */
160
161	return ctx->key_size;
162}
163
164static void pkcs1pad_sg_set_buf(struct scatterlist *sg, void *buf, size_t len,
165		struct scatterlist *next)
166{
167	int nsegs = next ? 2 : 1;
168
169	sg_init_table(sg, nsegs);
170	sg_set_buf(sg, buf, len);
171
172	if (next)
173		sg_chain(sg, nsegs, next);
174}
175
176static int pkcs1pad_encrypt_sign_complete(struct akcipher_request *req, int err)
177{
178	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
179	struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
180	struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
181	unsigned int pad_len;
182	unsigned int len;
183	u8 *out_buf;
184
185	if (err)
186		goto out;
187
188	len = req_ctx->child_req.dst_len;
189	pad_len = ctx->key_size - len;
190
191	/* Four billion to one */
192	if (likely(!pad_len))
193		goto out;
194
195	out_buf = kzalloc(ctx->key_size, GFP_KERNEL);
196	err = -ENOMEM;
197	if (!out_buf)
198		goto out;
199
200	sg_copy_to_buffer(req->dst, sg_nents_for_len(req->dst, len),
201			  out_buf + pad_len, len);
202	sg_copy_from_buffer(req->dst,
203			    sg_nents_for_len(req->dst, ctx->key_size),
204			    out_buf, ctx->key_size);
205	kzfree(out_buf);
206
207out:
208	req->dst_len = ctx->key_size;
209
210	kfree(req_ctx->in_buf);
211
212	return err;
213}
214
215static void pkcs1pad_encrypt_sign_complete_cb(
216		struct crypto_async_request *child_async_req, int err)
217{
218	struct akcipher_request *req = child_async_req->data;
219	struct crypto_async_request async_req;
220
221	if (err == -EINPROGRESS)
222		return;
223
224	async_req.data = req->base.data;
225	async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
226	async_req.flags = child_async_req->flags;
227	req->base.complete(&async_req,
228			pkcs1pad_encrypt_sign_complete(req, err));
229}
230
231static int pkcs1pad_encrypt(struct akcipher_request *req)
232{
233	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
234	struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
235	struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
236	int err;
237	unsigned int i, ps_end;
238
239	if (!ctx->key_size)
240		return -EINVAL;
241
242	if (req->src_len > ctx->key_size - 11)
243		return -EOVERFLOW;
244
245	if (req->dst_len < ctx->key_size) {
246		req->dst_len = ctx->key_size;
247		return -EOVERFLOW;
248	}
249
250	req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len,
251				  GFP_KERNEL);
252	if (!req_ctx->in_buf)
253		return -ENOMEM;
254
255	ps_end = ctx->key_size - req->src_len - 2;
256	req_ctx->in_buf[0] = 0x02;
257	for (i = 1; i < ps_end; i++)
258		req_ctx->in_buf[i] = 1 + prandom_u32_max(255);
259	req_ctx->in_buf[ps_end] = 0x00;
260
261	pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
262			ctx->key_size - 1 - req->src_len, req->src);
263
264	req_ctx->out_buf = kmalloc(ctx->key_size, GFP_KERNEL);
265	if (!req_ctx->out_buf) {
266		kfree(req_ctx->in_buf);
267		return -ENOMEM;
268	}
269
270	pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
271			ctx->key_size, NULL);
272
273	akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
274	akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
275			pkcs1pad_encrypt_sign_complete_cb, req);
276
277	/* Reuse output buffer */
278	akcipher_request_set_crypt(&req_ctx->child_req, req_ctx->in_sg,
279				   req->dst, ctx->key_size - 1, req->dst_len);
280
281	err = crypto_akcipher_encrypt(&req_ctx->child_req);
282	if (err != -EINPROGRESS && err != -EBUSY)
283		return pkcs1pad_encrypt_sign_complete(req, err);
284
285	return err;
286}
287
288static int pkcs1pad_decrypt_complete(struct akcipher_request *req, int err)
289{
290	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
291	struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
292	struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
293	unsigned int dst_len;
294	unsigned int pos;
295	u8 *out_buf;
296
297	if (err)
298		goto done;
299
300	err = -EINVAL;
301	dst_len = req_ctx->child_req.dst_len;
302	if (dst_len < ctx->key_size - 1)
303		goto done;
304
305	out_buf = req_ctx->out_buf;
306	if (dst_len == ctx->key_size) {
307		if (out_buf[0] != 0x00)
308			/* Decrypted value had no leading 0 byte */
309			goto done;
310
311		dst_len--;
312		out_buf++;
313	}
314
315	if (out_buf[0] != 0x02)
316		goto done;
317
318	for (pos = 1; pos < dst_len; pos++)
319		if (out_buf[pos] == 0x00)
320			break;
321	if (pos < 9 || pos == dst_len)
322		goto done;
323	pos++;
324
325	err = 0;
326
327	if (req->dst_len < dst_len - pos)
328		err = -EOVERFLOW;
329	req->dst_len = dst_len - pos;
330
331	if (!err)
332		sg_copy_from_buffer(req->dst,
333				sg_nents_for_len(req->dst, req->dst_len),
334				out_buf + pos, req->dst_len);
335
336done:
337	kzfree(req_ctx->out_buf);
338
339	return err;
340}
341
342static void pkcs1pad_decrypt_complete_cb(
343		struct crypto_async_request *child_async_req, int err)
344{
345	struct akcipher_request *req = child_async_req->data;
346	struct crypto_async_request async_req;
347
348	if (err == -EINPROGRESS)
349		return;
350
351	async_req.data = req->base.data;
352	async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
353	async_req.flags = child_async_req->flags;
354	req->base.complete(&async_req, pkcs1pad_decrypt_complete(req, err));
355}
356
357static int pkcs1pad_decrypt(struct akcipher_request *req)
358{
359	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
360	struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
361	struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
362	int err;
363
364	if (!ctx->key_size || req->src_len != ctx->key_size)
365		return -EINVAL;
366
367	req_ctx->out_buf = kmalloc(ctx->key_size, GFP_KERNEL);
368	if (!req_ctx->out_buf)
369		return -ENOMEM;
370
371	pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
372			    ctx->key_size, NULL);
373
374	akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
375	akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
376			pkcs1pad_decrypt_complete_cb, req);
377
378	/* Reuse input buffer, output to a new buffer */
379	akcipher_request_set_crypt(&req_ctx->child_req, req->src,
380				   req_ctx->out_sg, req->src_len,
381				   ctx->key_size);
382
383	err = crypto_akcipher_decrypt(&req_ctx->child_req);
384	if (err != -EINPROGRESS && err != -EBUSY)
385		return pkcs1pad_decrypt_complete(req, err);
386
387	return err;
388}
389
390static int pkcs1pad_sign(struct akcipher_request *req)
391{
392	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
393	struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
394	struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
395	struct akcipher_instance *inst = akcipher_alg_instance(tfm);
396	struct pkcs1pad_inst_ctx *ictx = akcipher_instance_ctx(inst);
397	const struct rsa_asn1_template *digest_info = ictx->digest_info;
398	int err;
399	unsigned int ps_end, digest_size = 0;
400
401	if (!ctx->key_size)
402		return -EINVAL;
403
404	digest_size = digest_info->size;
 
405
406	if (req->src_len + digest_size > ctx->key_size - 11)
407		return -EOVERFLOW;
408
409	if (req->dst_len < ctx->key_size) {
410		req->dst_len = ctx->key_size;
411		return -EOVERFLOW;
412	}
413
414	req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len,
415				  GFP_KERNEL);
416	if (!req_ctx->in_buf)
417		return -ENOMEM;
418
419	ps_end = ctx->key_size - digest_size - req->src_len - 2;
420	req_ctx->in_buf[0] = 0x01;
421	memset(req_ctx->in_buf + 1, 0xff, ps_end - 1);
422	req_ctx->in_buf[ps_end] = 0x00;
423
424	memcpy(req_ctx->in_buf + ps_end + 1, digest_info->data,
425	       digest_info->size);
 
426
427	pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
428			ctx->key_size - 1 - req->src_len, req->src);
429
430	akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
431	akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
432			pkcs1pad_encrypt_sign_complete_cb, req);
433
434	/* Reuse output buffer */
435	akcipher_request_set_crypt(&req_ctx->child_req, req_ctx->in_sg,
436				   req->dst, ctx->key_size - 1, req->dst_len);
437
438	err = crypto_akcipher_sign(&req_ctx->child_req);
439	if (err != -EINPROGRESS && err != -EBUSY)
440		return pkcs1pad_encrypt_sign_complete(req, err);
441
442	return err;
443}
444
445static int pkcs1pad_verify_complete(struct akcipher_request *req, int err)
446{
447	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
448	struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
449	struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
450	struct akcipher_instance *inst = akcipher_alg_instance(tfm);
451	struct pkcs1pad_inst_ctx *ictx = akcipher_instance_ctx(inst);
452	const struct rsa_asn1_template *digest_info = ictx->digest_info;
453	unsigned int dst_len;
454	unsigned int pos;
455	u8 *out_buf;
456
457	if (err)
458		goto done;
459
460	err = -EINVAL;
461	dst_len = req_ctx->child_req.dst_len;
462	if (dst_len < ctx->key_size - 1)
463		goto done;
464
465	out_buf = req_ctx->out_buf;
466	if (dst_len == ctx->key_size) {
467		if (out_buf[0] != 0x00)
468			/* Decrypted value had no leading 0 byte */
469			goto done;
470
471		dst_len--;
472		out_buf++;
473	}
474
475	err = -EBADMSG;
476	if (out_buf[0] != 0x01)
477		goto done;
478
479	for (pos = 1; pos < dst_len; pos++)
480		if (out_buf[pos] != 0xff)
481			break;
482
483	if (pos < 9 || pos == dst_len || out_buf[pos] != 0x00)
484		goto done;
485	pos++;
486
487	if (crypto_memneq(out_buf + pos, digest_info->data, digest_info->size))
488		goto done;
 
 
489
490	pos += digest_info->size;
 
491
492	err = 0;
493
494	if (req->dst_len < dst_len - pos)
495		err = -EOVERFLOW;
496	req->dst_len = dst_len - pos;
497
498	if (!err)
499		sg_copy_from_buffer(req->dst,
500				sg_nents_for_len(req->dst, req->dst_len),
501				out_buf + pos, req->dst_len);
 
 
 
 
 
 
 
502done:
503	kzfree(req_ctx->out_buf);
504
505	return err;
506}
507
508static void pkcs1pad_verify_complete_cb(
509		struct crypto_async_request *child_async_req, int err)
510{
511	struct akcipher_request *req = child_async_req->data;
512	struct crypto_async_request async_req;
513
514	if (err == -EINPROGRESS)
515		return;
516
517	async_req.data = req->base.data;
518	async_req.tfm = crypto_akcipher_tfm(crypto_akcipher_reqtfm(req));
519	async_req.flags = child_async_req->flags;
520	req->base.complete(&async_req, pkcs1pad_verify_complete(req, err));
521}
522
523/*
524 * The verify operation is here for completeness similar to the verification
525 * defined in RFC2313 section 10.2 except that block type 0 is not accepted,
526 * as in RFC2437.  RFC2437 section 9.2 doesn't define any operation to
527 * retrieve the DigestInfo from a signature, instead the user is expected
528 * to call the sign operation to generate the expected signature and compare
529 * signatures instead of the message-digests.
530 */
531static int pkcs1pad_verify(struct akcipher_request *req)
532{
533	struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
534	struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
535	struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
536	int err;
537
538	if (!ctx->key_size || req->src_len < ctx->key_size)
 
 
539		return -EINVAL;
540
541	req_ctx->out_buf = kmalloc(ctx->key_size, GFP_KERNEL);
542	if (!req_ctx->out_buf)
543		return -ENOMEM;
544
545	pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
546			    ctx->key_size, NULL);
547
548	akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
549	akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
550			pkcs1pad_verify_complete_cb, req);
551
552	/* Reuse input buffer, output to a new buffer */
553	akcipher_request_set_crypt(&req_ctx->child_req, req->src,
554				   req_ctx->out_sg, req->src_len,
555				   ctx->key_size);
556
557	err = crypto_akcipher_verify(&req_ctx->child_req);
558	if (err != -EINPROGRESS && err != -EBUSY)
559		return pkcs1pad_verify_complete(req, err);
560
561	return err;
562}
563
564static int pkcs1pad_init_tfm(struct crypto_akcipher *tfm)
565{
566	struct akcipher_instance *inst = akcipher_alg_instance(tfm);
567	struct pkcs1pad_inst_ctx *ictx = akcipher_instance_ctx(inst);
568	struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
569	struct crypto_akcipher *child_tfm;
570
571	child_tfm = crypto_spawn_akcipher(&ictx->spawn);
572	if (IS_ERR(child_tfm))
573		return PTR_ERR(child_tfm);
574
575	ctx->child = child_tfm;
576	return 0;
577}
578
579static void pkcs1pad_exit_tfm(struct crypto_akcipher *tfm)
580{
581	struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
582
583	crypto_free_akcipher(ctx->child);
584}
585
586static void pkcs1pad_free(struct akcipher_instance *inst)
587{
588	struct pkcs1pad_inst_ctx *ctx = akcipher_instance_ctx(inst);
589	struct crypto_akcipher_spawn *spawn = &ctx->spawn;
590
591	crypto_drop_akcipher(spawn);
592	kfree(inst);
593}
594
595static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
596{
597	const struct rsa_asn1_template *digest_info;
598	struct crypto_attr_type *algt;
599	struct akcipher_instance *inst;
600	struct pkcs1pad_inst_ctx *ctx;
601	struct crypto_akcipher_spawn *spawn;
602	struct akcipher_alg *rsa_alg;
603	const char *rsa_alg_name;
604	const char *hash_name;
605	int err;
606
607	algt = crypto_get_attr_type(tb);
608	if (IS_ERR(algt))
609		return PTR_ERR(algt);
610
611	if ((algt->type ^ CRYPTO_ALG_TYPE_AKCIPHER) & algt->mask)
612		return -EINVAL;
613
614	rsa_alg_name = crypto_attr_alg_name(tb[1]);
615	if (IS_ERR(rsa_alg_name))
616		return PTR_ERR(rsa_alg_name);
617
618	hash_name = crypto_attr_alg_name(tb[2]);
619	if (IS_ERR(hash_name))
620		return PTR_ERR(hash_name);
621
622	digest_info = rsa_lookup_asn1(hash_name);
623	if (!digest_info)
624		return -EINVAL;
 
 
 
625
626	inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
627	if (!inst)
628		return -ENOMEM;
629
630	ctx = akcipher_instance_ctx(inst);
631	spawn = &ctx->spawn;
632	ctx->digest_info = digest_info;
633
634	crypto_set_spawn(&spawn->base, akcipher_crypto_instance(inst));
635	err = crypto_grab_akcipher(spawn, rsa_alg_name, 0,
636			crypto_requires_sync(algt->type, algt->mask));
637	if (err)
638		goto out_free_inst;
639
640	rsa_alg = crypto_spawn_akcipher_alg(spawn);
641
642	err = -ENAMETOOLONG;
643
644	if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
645		     "pkcs1pad(%s,%s)", rsa_alg->base.cra_name, hash_name) >=
646	    CRYPTO_MAX_ALG_NAME ||
647	    snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
648		     "pkcs1pad(%s,%s)",
649		     rsa_alg->base.cra_driver_name, hash_name) >=
650	    CRYPTO_MAX_ALG_NAME)
651		goto out_drop_alg;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
652
653	inst->alg.base.cra_flags = rsa_alg->base.cra_flags & CRYPTO_ALG_ASYNC;
654	inst->alg.base.cra_priority = rsa_alg->base.cra_priority;
655	inst->alg.base.cra_ctxsize = sizeof(struct pkcs1pad_ctx);
656
657	inst->alg.init = pkcs1pad_init_tfm;
658	inst->alg.exit = pkcs1pad_exit_tfm;
659
660	inst->alg.encrypt = pkcs1pad_encrypt;
661	inst->alg.decrypt = pkcs1pad_decrypt;
662	inst->alg.sign = pkcs1pad_sign;
663	inst->alg.verify = pkcs1pad_verify;
664	inst->alg.set_pub_key = pkcs1pad_set_pub_key;
665	inst->alg.set_priv_key = pkcs1pad_set_priv_key;
666	inst->alg.max_size = pkcs1pad_get_max_size;
667	inst->alg.reqsize = sizeof(struct pkcs1pad_request) + rsa_alg->reqsize;
668
669	inst->free = pkcs1pad_free;
670
671	err = akcipher_register_instance(tmpl, inst);
672	if (err)
673		goto out_drop_alg;
674
675	return 0;
676
677out_drop_alg:
678	crypto_drop_akcipher(spawn);
679out_free_inst:
680	kfree(inst);
681	return err;
682}
683
684struct crypto_template rsa_pkcs1pad_tmpl = {
685	.name = "pkcs1pad",
686	.create = pkcs1pad_create,
687	.module = THIS_MODULE,
688};