Linux Audio

Check our new training course

Loading...
v3.15
 
  1/*
  2 * Crypto user configuration API.
  3 *
  4 * Copyright (C) 2011 secunet Security Networks AG
  5 * Copyright (C) 2011 Steffen Klassert <steffen.klassert@secunet.com>
  6 *
  7 * This program is free software; you can redistribute it and/or modify it
  8 * under the terms and conditions of the GNU General Public License,
  9 * version 2, as published by the Free Software Foundation.
 10 *
 11 * This program is distributed in the hope it will be useful, but WITHOUT
 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 13 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 14 * more details.
 15 *
 16 * You should have received a copy of the GNU General Public License along with
 17 * this program; if not, write to the Free Software Foundation, Inc.,
 18 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
 19 */
 20
 21#include <linux/module.h>
 22#include <linux/crypto.h>
 23#include <linux/cryptouser.h>
 24#include <linux/sched.h>
 25#include <net/netlink.h>
 26#include <linux/security.h>
 
 27#include <net/net_namespace.h>
 28#include <crypto/internal/aead.h>
 29#include <crypto/internal/skcipher.h>
 
 
 
 30
 31#include "internal.h"
 32
 33#define null_terminated(x)	(strnlen(x, sizeof(x)) < sizeof(x))
 34
 35static DEFINE_MUTEX(crypto_cfg_mutex);
 36
 37/* The crypto netlink socket */
 38static struct sock *crypto_nlsk;
 39
 40struct crypto_dump_info {
 41	struct sk_buff *in_skb;
 42	struct sk_buff *out_skb;
 43	u32 nlmsg_seq;
 44	u16 nlmsg_flags;
 45};
 46
 47static struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact)
 48{
 49	struct crypto_alg *q, *alg = NULL;
 50
 51	down_read(&crypto_alg_sem);
 52
 53	list_for_each_entry(q, &crypto_alg_list, cra_list) {
 54		int match = 0;
 55
 
 
 
 56		if ((q->cra_flags ^ p->cru_type) & p->cru_mask)
 57			continue;
 58
 59		if (strlen(p->cru_driver_name))
 60			match = !strcmp(q->cra_driver_name,
 61					p->cru_driver_name);
 62		else if (!exact)
 63			match = !strcmp(q->cra_name, p->cru_name);
 64
 65		if (match) {
 66			alg = q;
 67			break;
 68		}
 
 
 
 
 69	}
 70
 71	up_read(&crypto_alg_sem);
 72
 73	return alg;
 74}
 75
 76static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
 77{
 78	struct crypto_report_cipher rcipher;
 79
 80	strncpy(rcipher.type, "cipher", sizeof(rcipher.type));
 
 
 81
 82	rcipher.blocksize = alg->cra_blocksize;
 83	rcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
 84	rcipher.max_keysize = alg->cra_cipher.cia_max_keysize;
 85
 86	if (nla_put(skb, CRYPTOCFGA_REPORT_CIPHER,
 87		    sizeof(struct crypto_report_cipher), &rcipher))
 88		goto nla_put_failure;
 89	return 0;
 90
 91nla_put_failure:
 92	return -EMSGSIZE;
 93}
 94
 95static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
 96{
 97	struct crypto_report_comp rcomp;
 98
 99	strncpy(rcomp.type, "compression", sizeof(rcomp.type));
100	if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
101		    sizeof(struct crypto_report_comp), &rcomp))
102		goto nla_put_failure;
103	return 0;
104
105nla_put_failure:
106	return -EMSGSIZE;
 
107}
108
109static int crypto_report_one(struct crypto_alg *alg,
110			     struct crypto_user_alg *ualg, struct sk_buff *skb)
111{
112	strncpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name));
113	strncpy(ualg->cru_driver_name, alg->cra_driver_name,
 
 
114		sizeof(ualg->cru_driver_name));
115	strncpy(ualg->cru_module_name, module_name(alg->cra_module),
116		sizeof(ualg->cru_module_name));
117
118	ualg->cru_type = 0;
119	ualg->cru_mask = 0;
120	ualg->cru_flags = alg->cra_flags;
121	ualg->cru_refcnt = atomic_read(&alg->cra_refcnt);
122
123	if (nla_put_u32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority))
124		goto nla_put_failure;
125	if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
126		struct crypto_report_larval rl;
127
128		strncpy(rl.type, "larval", sizeof(rl.type));
129		if (nla_put(skb, CRYPTOCFGA_REPORT_LARVAL,
130			    sizeof(struct crypto_report_larval), &rl))
131			goto nla_put_failure;
132		goto out;
133	}
134
135	if (alg->cra_type && alg->cra_type->report) {
136		if (alg->cra_type->report(skb, alg))
137			goto nla_put_failure;
138
139		goto out;
140	}
141
142	switch (alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL)) {
143	case CRYPTO_ALG_TYPE_CIPHER:
144		if (crypto_report_cipher(skb, alg))
145			goto nla_put_failure;
146
147		break;
148	case CRYPTO_ALG_TYPE_COMPRESS:
149		if (crypto_report_comp(skb, alg))
150			goto nla_put_failure;
151
152		break;
153	}
154
155out:
156	return 0;
157
158nla_put_failure:
159	return -EMSGSIZE;
160}
161
162static int crypto_report_alg(struct crypto_alg *alg,
163			     struct crypto_dump_info *info)
164{
165	struct sk_buff *in_skb = info->in_skb;
166	struct sk_buff *skb = info->out_skb;
167	struct nlmsghdr *nlh;
168	struct crypto_user_alg *ualg;
169	int err = 0;
170
171	nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, info->nlmsg_seq,
172			CRYPTO_MSG_GETALG, sizeof(*ualg), info->nlmsg_flags);
173	if (!nlh) {
174		err = -EMSGSIZE;
175		goto out;
176	}
177
178	ualg = nlmsg_data(nlh);
179
180	err = crypto_report_one(alg, ualg, skb);
181	if (err) {
182		nlmsg_cancel(skb, nlh);
183		goto out;
184	}
185
186	nlmsg_end(skb, nlh);
187
188out:
189	return err;
190}
191
192static int crypto_report(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
193			 struct nlattr **attrs)
194{
 
195	struct crypto_user_alg *p = nlmsg_data(in_nlh);
196	struct crypto_alg *alg;
197	struct sk_buff *skb;
198	struct crypto_dump_info info;
199	int err;
200
201	if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
202		return -EINVAL;
203
204	if (!p->cru_driver_name[0])
205		return -EINVAL;
206
207	alg = crypto_alg_match(p, 1);
208	if (!alg)
209		return -ENOENT;
210
211	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
 
212	if (!skb)
213		return -ENOMEM;
214
215	info.in_skb = in_skb;
216	info.out_skb = skb;
217	info.nlmsg_seq = in_nlh->nlmsg_seq;
218	info.nlmsg_flags = 0;
219
220	err = crypto_report_alg(alg, &info);
221	if (err)
 
 
 
 
 
222		return err;
 
223
224	return nlmsg_unicast(crypto_nlsk, skb, NETLINK_CB(in_skb).portid);
225}
226
227static int crypto_dump_report(struct sk_buff *skb, struct netlink_callback *cb)
228{
229	struct crypto_alg *alg;
 
230	struct crypto_dump_info info;
231	int err;
232
233	if (cb->args[0])
234		goto out;
235
236	cb->args[0] = 1;
237
238	info.in_skb = cb->skb;
239	info.out_skb = skb;
240	info.nlmsg_seq = cb->nlh->nlmsg_seq;
241	info.nlmsg_flags = NLM_F_MULTI;
242
 
243	list_for_each_entry(alg, &crypto_alg_list, cra_list) {
244		err = crypto_report_alg(alg, &info);
245		if (err)
246			goto out_err;
 
 
 
 
 
247	}
248
 
249out:
250	return skb->len;
251out_err:
252	return err;
253}
254
255static int crypto_dump_report_done(struct netlink_callback *cb)
256{
257	return 0;
258}
259
260static int crypto_update_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
261			     struct nlattr **attrs)
262{
263	struct crypto_alg *alg;
264	struct crypto_user_alg *p = nlmsg_data(nlh);
265	struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL];
266	LIST_HEAD(list);
267
 
 
 
268	if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
269		return -EINVAL;
270
271	if (priority && !strlen(p->cru_driver_name))
272		return -EINVAL;
273
274	alg = crypto_alg_match(p, 1);
275	if (!alg)
276		return -ENOENT;
277
278	down_write(&crypto_alg_sem);
279
280	crypto_remove_spawns(alg, &list, NULL);
281
282	if (priority)
283		alg->cra_priority = nla_get_u32(priority);
284
285	up_write(&crypto_alg_sem);
286
 
287	crypto_remove_final(&list);
288
289	return 0;
290}
291
292static int crypto_del_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
293			  struct nlattr **attrs)
294{
295	struct crypto_alg *alg;
296	struct crypto_user_alg *p = nlmsg_data(nlh);
 
 
 
 
297
298	if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
299		return -EINVAL;
300
301	alg = crypto_alg_match(p, 1);
302	if (!alg)
303		return -ENOENT;
304
305	/* We can not unregister core algorithms such as aes-generic.
306	 * We would loose the reference in the crypto_alg_list to this algorithm
307	 * if we try to unregister. Unregistering such an algorithm without
308	 * removing the module is not possible, so we restrict to crypto
309	 * instances that are build from templates. */
 
310	if (!(alg->cra_flags & CRYPTO_ALG_INSTANCE))
311		return -EINVAL;
312
313	if (atomic_read(&alg->cra_refcnt) != 1)
314		return -EBUSY;
 
315
316	return crypto_unregister_instance(alg);
317}
318
319static struct crypto_alg *crypto_user_skcipher_alg(const char *name, u32 type,
320						   u32 mask)
321{
322	int err;
323	struct crypto_alg *alg;
324
325	type = crypto_skcipher_type(type);
326	mask = crypto_skcipher_mask(mask);
327
328	for (;;) {
329		alg = crypto_lookup_skcipher(name,  type, mask);
330		if (!IS_ERR(alg))
331			return alg;
332
333		err = PTR_ERR(alg);
334		if (err != -EAGAIN)
335			break;
336		if (signal_pending(current)) {
337			err = -EINTR;
338			break;
339		}
340	}
341
342	return ERR_PTR(err);
343}
344
345static struct crypto_alg *crypto_user_aead_alg(const char *name, u32 type,
346					       u32 mask)
347{
348	int err;
349	struct crypto_alg *alg;
350
351	type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
352	type |= CRYPTO_ALG_TYPE_AEAD;
353	mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
354	mask |= CRYPTO_ALG_TYPE_MASK;
355
356	for (;;) {
357		alg = crypto_lookup_aead(name,  type, mask);
358		if (!IS_ERR(alg))
359			return alg;
360
361		err = PTR_ERR(alg);
362		if (err != -EAGAIN)
363			break;
364		if (signal_pending(current)) {
365			err = -EINTR;
366			break;
367		}
368	}
369
370	return ERR_PTR(err);
371}
372
373static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
374			  struct nlattr **attrs)
375{
376	int exact = 0;
377	const char *name;
378	struct crypto_alg *alg;
379	struct crypto_user_alg *p = nlmsg_data(nlh);
380	struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL];
381
 
 
 
382	if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
383		return -EINVAL;
384
385	if (strlen(p->cru_driver_name))
386		exact = 1;
387
388	if (priority && !exact)
389		return -EINVAL;
390
391	alg = crypto_alg_match(p, exact);
392	if (alg)
 
393		return -EEXIST;
 
394
395	if (strlen(p->cru_driver_name))
396		name = p->cru_driver_name;
397	else
398		name = p->cru_name;
399
400	switch (p->cru_type & p->cru_mask & CRYPTO_ALG_TYPE_MASK) {
401	case CRYPTO_ALG_TYPE_AEAD:
402		alg = crypto_user_aead_alg(name, p->cru_type, p->cru_mask);
403		break;
404	case CRYPTO_ALG_TYPE_GIVCIPHER:
405	case CRYPTO_ALG_TYPE_BLKCIPHER:
406	case CRYPTO_ALG_TYPE_ABLKCIPHER:
407		alg = crypto_user_skcipher_alg(name, p->cru_type, p->cru_mask);
408		break;
409	default:
410		alg = crypto_alg_mod_lookup(name, p->cru_type, p->cru_mask);
411	}
412
413	if (IS_ERR(alg))
414		return PTR_ERR(alg);
415
416	down_write(&crypto_alg_sem);
417
418	if (priority)
419		alg->cra_priority = nla_get_u32(priority);
420
421	up_write(&crypto_alg_sem);
422
423	crypto_mod_put(alg);
424
425	return 0;
426}
427
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
428#define MSGSIZE(type) sizeof(struct type)
429
430static const int crypto_msg_min[CRYPTO_NR_MSGTYPES] = {
431	[CRYPTO_MSG_NEWALG	- CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
432	[CRYPTO_MSG_DELALG	- CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
433	[CRYPTO_MSG_UPDATEALG	- CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
434	[CRYPTO_MSG_GETALG	- CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
 
 
435};
436
437static const struct nla_policy crypto_policy[CRYPTOCFGA_MAX+1] = {
438	[CRYPTOCFGA_PRIORITY_VAL]   = { .type = NLA_U32},
439};
440
441#undef MSGSIZE
442
443static const struct crypto_link {
444	int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **);
445	int (*dump)(struct sk_buff *, struct netlink_callback *);
446	int (*done)(struct netlink_callback *);
447} crypto_dispatch[CRYPTO_NR_MSGTYPES] = {
448	[CRYPTO_MSG_NEWALG	- CRYPTO_MSG_BASE] = { .doit = crypto_add_alg},
449	[CRYPTO_MSG_DELALG	- CRYPTO_MSG_BASE] = { .doit = crypto_del_alg},
450	[CRYPTO_MSG_UPDATEALG	- CRYPTO_MSG_BASE] = { .doit = crypto_update_alg},
451	[CRYPTO_MSG_GETALG	- CRYPTO_MSG_BASE] = { .doit = crypto_report,
452						       .dump = crypto_dump_report,
453						       .done = crypto_dump_report_done},
 
 
454};
455
456static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
 
457{
 
458	struct nlattr *attrs[CRYPTOCFGA_MAX+1];
459	const struct crypto_link *link;
460	int type, err;
461
462	type = nlh->nlmsg_type;
463	if (type > CRYPTO_MSG_MAX)
464		return -EINVAL;
465
466	type -= CRYPTO_MSG_BASE;
467	link = &crypto_dispatch[type];
468
469	if (!netlink_capable(skb, CAP_NET_ADMIN))
470		return -EPERM;
471
472	if ((type == (CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE) &&
473	    (nlh->nlmsg_flags & NLM_F_DUMP))) {
474		struct crypto_alg *alg;
475		u16 dump_alloc = 0;
476
477		if (link->dump == NULL)
478			return -EINVAL;
479
 
480		list_for_each_entry(alg, &crypto_alg_list, cra_list)
481			dump_alloc += CRYPTO_REPORT_MAXSIZE;
 
482
483		{
484			struct netlink_dump_control c = {
485				.dump = link->dump,
486				.done = link->done,
487				.min_dump_alloc = dump_alloc,
488			};
489			return netlink_dump_start(crypto_nlsk, skb, nlh, &c);
490		}
 
 
491	}
492
493	err = nlmsg_parse(nlh, crypto_msg_min[type], attrs, CRYPTOCFGA_MAX,
494			  crypto_policy);
495	if (err < 0)
496		return err;
497
498	if (link->doit == NULL)
499		return -EINVAL;
500
501	return link->doit(skb, nlh, attrs);
502}
503
504static void crypto_netlink_rcv(struct sk_buff *skb)
505{
506	mutex_lock(&crypto_cfg_mutex);
507	netlink_rcv_skb(skb, &crypto_user_rcv_msg);
508	mutex_unlock(&crypto_cfg_mutex);
509}
510
511static int __init crypto_user_init(void)
512{
513	struct netlink_kernel_cfg cfg = {
514		.input	= crypto_netlink_rcv,
515	};
516
517	crypto_nlsk = netlink_kernel_create(&init_net, NETLINK_CRYPTO, &cfg);
518	if (!crypto_nlsk)
519		return -ENOMEM;
520
521	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
522}
523
524static void __exit crypto_user_exit(void)
525{
526	netlink_kernel_release(crypto_nlsk);
527}
528
529module_init(crypto_user_init);
530module_exit(crypto_user_exit);
531MODULE_LICENSE("GPL");
532MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
533MODULE_DESCRIPTION("Crypto userspace configuration API");
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Crypto user configuration API.
  4 *
  5 * Copyright (C) 2011 secunet Security Networks AG
  6 * Copyright (C) 2011 Steffen Klassert <steffen.klassert@secunet.com>
 
 
 
 
 
 
 
 
 
 
 
 
 
  7 */
  8
  9#include <linux/module.h>
 10#include <linux/crypto.h>
 11#include <linux/cryptouser.h>
 12#include <linux/sched.h>
 
 13#include <linux/security.h>
 14#include <net/netlink.h>
 15#include <net/net_namespace.h>
 16#include <net/sock.h>
 17#include <crypto/internal/skcipher.h>
 18#include <crypto/internal/rng.h>
 19#include <crypto/akcipher.h>
 20#include <crypto/kpp.h>
 21
 22#include "internal.h"
 23
 24#define null_terminated(x)	(strnlen(x, sizeof(x)) < sizeof(x))
 25
 26static DEFINE_MUTEX(crypto_cfg_mutex);
 27
 
 
 
 28struct crypto_dump_info {
 29	struct sk_buff *in_skb;
 30	struct sk_buff *out_skb;
 31	u32 nlmsg_seq;
 32	u16 nlmsg_flags;
 33};
 34
 35static struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact)
 36{
 37	struct crypto_alg *q, *alg = NULL;
 38
 39	down_read(&crypto_alg_sem);
 40
 41	list_for_each_entry(q, &crypto_alg_list, cra_list) {
 42		int match = 0;
 43
 44		if (crypto_is_larval(q))
 45			continue;
 46
 47		if ((q->cra_flags ^ p->cru_type) & p->cru_mask)
 48			continue;
 49
 50		if (strlen(p->cru_driver_name))
 51			match = !strcmp(q->cra_driver_name,
 52					p->cru_driver_name);
 53		else if (!exact)
 54			match = !strcmp(q->cra_name, p->cru_name);
 55
 56		if (!match)
 57			continue;
 58
 59		if (unlikely(!crypto_mod_get(q)))
 60			continue;
 61
 62		alg = q;
 63		break;
 64	}
 65
 66	up_read(&crypto_alg_sem);
 67
 68	return alg;
 69}
 70
 71static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
 72{
 73	struct crypto_report_cipher rcipher;
 74
 75	memset(&rcipher, 0, sizeof(rcipher));
 76
 77	strscpy(rcipher.type, "cipher", sizeof(rcipher.type));
 78
 79	rcipher.blocksize = alg->cra_blocksize;
 80	rcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
 81	rcipher.max_keysize = alg->cra_cipher.cia_max_keysize;
 82
 83	return nla_put(skb, CRYPTOCFGA_REPORT_CIPHER,
 84		       sizeof(rcipher), &rcipher);
 
 
 
 
 
 85}
 86
 87static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
 88{
 89	struct crypto_report_comp rcomp;
 90
 91	memset(&rcomp, 0, sizeof(rcomp));
 
 
 
 
 92
 93	strscpy(rcomp.type, "compression", sizeof(rcomp.type));
 94
 95	return nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS, sizeof(rcomp), &rcomp);
 96}
 97
 98static int crypto_report_one(struct crypto_alg *alg,
 99			     struct crypto_user_alg *ualg, struct sk_buff *skb)
100{
101	memset(ualg, 0, sizeof(*ualg));
102
103	strscpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name));
104	strscpy(ualg->cru_driver_name, alg->cra_driver_name,
105		sizeof(ualg->cru_driver_name));
106	strscpy(ualg->cru_module_name, module_name(alg->cra_module),
107		sizeof(ualg->cru_module_name));
108
109	ualg->cru_type = 0;
110	ualg->cru_mask = 0;
111	ualg->cru_flags = alg->cra_flags;
112	ualg->cru_refcnt = refcount_read(&alg->cra_refcnt);
113
114	if (nla_put_u32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority))
115		goto nla_put_failure;
116	if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
117		struct crypto_report_larval rl;
118
119		memset(&rl, 0, sizeof(rl));
120		strscpy(rl.type, "larval", sizeof(rl.type));
121		if (nla_put(skb, CRYPTOCFGA_REPORT_LARVAL, sizeof(rl), &rl))
122			goto nla_put_failure;
123		goto out;
124	}
125
126	if (alg->cra_type && alg->cra_type->report) {
127		if (alg->cra_type->report(skb, alg))
128			goto nla_put_failure;
129
130		goto out;
131	}
132
133	switch (alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL)) {
134	case CRYPTO_ALG_TYPE_CIPHER:
135		if (crypto_report_cipher(skb, alg))
136			goto nla_put_failure;
137
138		break;
139	case CRYPTO_ALG_TYPE_COMPRESS:
140		if (crypto_report_comp(skb, alg))
141			goto nla_put_failure;
142
143		break;
144	}
145
146out:
147	return 0;
148
149nla_put_failure:
150	return -EMSGSIZE;
151}
152
153static int crypto_report_alg(struct crypto_alg *alg,
154			     struct crypto_dump_info *info)
155{
156	struct sk_buff *in_skb = info->in_skb;
157	struct sk_buff *skb = info->out_skb;
158	struct nlmsghdr *nlh;
159	struct crypto_user_alg *ualg;
160	int err = 0;
161
162	nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, info->nlmsg_seq,
163			CRYPTO_MSG_GETALG, sizeof(*ualg), info->nlmsg_flags);
164	if (!nlh) {
165		err = -EMSGSIZE;
166		goto out;
167	}
168
169	ualg = nlmsg_data(nlh);
170
171	err = crypto_report_one(alg, ualg, skb);
172	if (err) {
173		nlmsg_cancel(skb, nlh);
174		goto out;
175	}
176
177	nlmsg_end(skb, nlh);
178
179out:
180	return err;
181}
182
183static int crypto_report(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
184			 struct nlattr **attrs)
185{
186	struct net *net = sock_net(in_skb->sk);
187	struct crypto_user_alg *p = nlmsg_data(in_nlh);
188	struct crypto_alg *alg;
189	struct sk_buff *skb;
190	struct crypto_dump_info info;
191	int err;
192
193	if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
194		return -EINVAL;
195
196	alg = crypto_alg_match(p, 0);
 
 
 
197	if (!alg)
198		return -ENOENT;
199
200	err = -ENOMEM;
201	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
202	if (!skb)
203		goto drop_alg;
204
205	info.in_skb = in_skb;
206	info.out_skb = skb;
207	info.nlmsg_seq = in_nlh->nlmsg_seq;
208	info.nlmsg_flags = 0;
209
210	err = crypto_report_alg(alg, &info);
211
212drop_alg:
213	crypto_mod_put(alg);
214
215	if (err) {
216		kfree_skb(skb);
217		return err;
218	}
219
220	return nlmsg_unicast(net->crypto_nlsk, skb, NETLINK_CB(in_skb).portid);
221}
222
223static int crypto_dump_report(struct sk_buff *skb, struct netlink_callback *cb)
224{
225	const size_t start_pos = cb->args[0];
226	size_t pos = 0;
227	struct crypto_dump_info info;
228	struct crypto_alg *alg;
229	int res;
 
 
 
 
230
231	info.in_skb = cb->skb;
232	info.out_skb = skb;
233	info.nlmsg_seq = cb->nlh->nlmsg_seq;
234	info.nlmsg_flags = NLM_F_MULTI;
235
236	down_read(&crypto_alg_sem);
237	list_for_each_entry(alg, &crypto_alg_list, cra_list) {
238		if (pos >= start_pos) {
239			res = crypto_report_alg(alg, &info);
240			if (res == -EMSGSIZE)
241				break;
242			if (res)
243				goto out;
244		}
245		pos++;
246	}
247	cb->args[0] = pos;
248	res = skb->len;
249out:
250	up_read(&crypto_alg_sem);
251	return res;
 
252}
253
254static int crypto_dump_report_done(struct netlink_callback *cb)
255{
256	return 0;
257}
258
259static int crypto_update_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
260			     struct nlattr **attrs)
261{
262	struct crypto_alg *alg;
263	struct crypto_user_alg *p = nlmsg_data(nlh);
264	struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL];
265	LIST_HEAD(list);
266
267	if (!netlink_capable(skb, CAP_NET_ADMIN))
268		return -EPERM;
269
270	if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
271		return -EINVAL;
272
273	if (priority && !strlen(p->cru_driver_name))
274		return -EINVAL;
275
276	alg = crypto_alg_match(p, 1);
277	if (!alg)
278		return -ENOENT;
279
280	down_write(&crypto_alg_sem);
281
282	crypto_remove_spawns(alg, &list, NULL);
283
284	if (priority)
285		alg->cra_priority = nla_get_u32(priority);
286
287	up_write(&crypto_alg_sem);
288
289	crypto_mod_put(alg);
290	crypto_remove_final(&list);
291
292	return 0;
293}
294
295static int crypto_del_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
296			  struct nlattr **attrs)
297{
298	struct crypto_alg *alg;
299	struct crypto_user_alg *p = nlmsg_data(nlh);
300	int err;
301
302	if (!netlink_capable(skb, CAP_NET_ADMIN))
303		return -EPERM;
304
305	if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
306		return -EINVAL;
307
308	alg = crypto_alg_match(p, 1);
309	if (!alg)
310		return -ENOENT;
311
312	/* We can not unregister core algorithms such as aes-generic.
313	 * We would loose the reference in the crypto_alg_list to this algorithm
314	 * if we try to unregister. Unregistering such an algorithm without
315	 * removing the module is not possible, so we restrict to crypto
316	 * instances that are build from templates. */
317	err = -EINVAL;
318	if (!(alg->cra_flags & CRYPTO_ALG_INSTANCE))
319		goto drop_alg;
320
321	err = -EBUSY;
322	if (refcount_read(&alg->cra_refcnt) > 2)
323		goto drop_alg;
324
325	crypto_unregister_instance((struct crypto_instance *)alg);
326	err = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
327
328drop_alg:
329	crypto_mod_put(alg);
330	return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
331}
332
333static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
334			  struct nlattr **attrs)
335{
336	int exact = 0;
337	const char *name;
338	struct crypto_alg *alg;
339	struct crypto_user_alg *p = nlmsg_data(nlh);
340	struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL];
341
342	if (!netlink_capable(skb, CAP_NET_ADMIN))
343		return -EPERM;
344
345	if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
346		return -EINVAL;
347
348	if (strlen(p->cru_driver_name))
349		exact = 1;
350
351	if (priority && !exact)
352		return -EINVAL;
353
354	alg = crypto_alg_match(p, exact);
355	if (alg) {
356		crypto_mod_put(alg);
357		return -EEXIST;
358	}
359
360	if (strlen(p->cru_driver_name))
361		name = p->cru_driver_name;
362	else
363		name = p->cru_name;
364
365	alg = crypto_alg_mod_lookup(name, p->cru_type, p->cru_mask);
 
 
 
 
 
 
 
 
 
 
 
 
366	if (IS_ERR(alg))
367		return PTR_ERR(alg);
368
369	down_write(&crypto_alg_sem);
370
371	if (priority)
372		alg->cra_priority = nla_get_u32(priority);
373
374	up_write(&crypto_alg_sem);
375
376	crypto_mod_put(alg);
377
378	return 0;
379}
380
381static int crypto_del_rng(struct sk_buff *skb, struct nlmsghdr *nlh,
382			  struct nlattr **attrs)
383{
384	if (!netlink_capable(skb, CAP_NET_ADMIN))
385		return -EPERM;
386	return crypto_del_default_rng();
387}
388
389static int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
390			     struct nlattr **attrs)
391{
392	/* No longer supported */
393	return -ENOTSUPP;
394}
395
396#define MSGSIZE(type) sizeof(struct type)
397
398static const int crypto_msg_min[CRYPTO_NR_MSGTYPES] = {
399	[CRYPTO_MSG_NEWALG	- CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
400	[CRYPTO_MSG_DELALG	- CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
401	[CRYPTO_MSG_UPDATEALG	- CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
402	[CRYPTO_MSG_GETALG	- CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
403	[CRYPTO_MSG_DELRNG	- CRYPTO_MSG_BASE] = 0,
404	[CRYPTO_MSG_GETSTAT	- CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
405};
406
407static const struct nla_policy crypto_policy[CRYPTOCFGA_MAX+1] = {
408	[CRYPTOCFGA_PRIORITY_VAL]   = { .type = NLA_U32},
409};
410
411#undef MSGSIZE
412
413static const struct crypto_link {
414	int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **);
415	int (*dump)(struct sk_buff *, struct netlink_callback *);
416	int (*done)(struct netlink_callback *);
417} crypto_dispatch[CRYPTO_NR_MSGTYPES] = {
418	[CRYPTO_MSG_NEWALG	- CRYPTO_MSG_BASE] = { .doit = crypto_add_alg},
419	[CRYPTO_MSG_DELALG	- CRYPTO_MSG_BASE] = { .doit = crypto_del_alg},
420	[CRYPTO_MSG_UPDATEALG	- CRYPTO_MSG_BASE] = { .doit = crypto_update_alg},
421	[CRYPTO_MSG_GETALG	- CRYPTO_MSG_BASE] = { .doit = crypto_report,
422						       .dump = crypto_dump_report,
423						       .done = crypto_dump_report_done},
424	[CRYPTO_MSG_DELRNG	- CRYPTO_MSG_BASE] = { .doit = crypto_del_rng },
425	[CRYPTO_MSG_GETSTAT	- CRYPTO_MSG_BASE] = { .doit = crypto_reportstat},
426};
427
428static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
429			       struct netlink_ext_ack *extack)
430{
431	struct net *net = sock_net(skb->sk);
432	struct nlattr *attrs[CRYPTOCFGA_MAX+1];
433	const struct crypto_link *link;
434	int type, err;
435
436	type = nlh->nlmsg_type;
437	if (type > CRYPTO_MSG_MAX)
438		return -EINVAL;
439
440	type -= CRYPTO_MSG_BASE;
441	link = &crypto_dispatch[type];
442
 
 
 
443	if ((type == (CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE) &&
444	    (nlh->nlmsg_flags & NLM_F_DUMP))) {
445		struct crypto_alg *alg;
446		unsigned long dump_alloc = 0;
447
448		if (link->dump == NULL)
449			return -EINVAL;
450
451		down_read(&crypto_alg_sem);
452		list_for_each_entry(alg, &crypto_alg_list, cra_list)
453			dump_alloc += CRYPTO_REPORT_MAXSIZE;
454		up_read(&crypto_alg_sem);
455
456		{
457			struct netlink_dump_control c = {
458				.dump = link->dump,
459				.done = link->done,
460				.min_dump_alloc = min(dump_alloc, 65535UL),
461			};
462			err = netlink_dump_start(net->crypto_nlsk, skb, nlh, &c);
463		}
464
465		return err;
466	}
467
468	err = nlmsg_parse_deprecated(nlh, crypto_msg_min[type], attrs,
469				     CRYPTOCFGA_MAX, crypto_policy, extack);
470	if (err < 0)
471		return err;
472
473	if (link->doit == NULL)
474		return -EINVAL;
475
476	return link->doit(skb, nlh, attrs);
477}
478
479static void crypto_netlink_rcv(struct sk_buff *skb)
480{
481	mutex_lock(&crypto_cfg_mutex);
482	netlink_rcv_skb(skb, &crypto_user_rcv_msg);
483	mutex_unlock(&crypto_cfg_mutex);
484}
485
486static int __net_init crypto_netlink_init(struct net *net)
487{
488	struct netlink_kernel_cfg cfg = {
489		.input	= crypto_netlink_rcv,
490	};
491
492	net->crypto_nlsk = netlink_kernel_create(net, NETLINK_CRYPTO, &cfg);
493	return net->crypto_nlsk == NULL ? -ENOMEM : 0;
494}
495
496static void __net_exit crypto_netlink_exit(struct net *net)
497{
498	netlink_kernel_release(net->crypto_nlsk);
499	net->crypto_nlsk = NULL;
500}
501
502static struct pernet_operations crypto_netlink_net_ops = {
503	.init = crypto_netlink_init,
504	.exit = crypto_netlink_exit,
505};
506
507static int __init crypto_user_init(void)
508{
509	return register_pernet_subsys(&crypto_netlink_net_ops);
510}
511
512static void __exit crypto_user_exit(void)
513{
514	unregister_pernet_subsys(&crypto_netlink_net_ops);
515}
516
517module_init(crypto_user_init);
518module_exit(crypto_user_exit);
519MODULE_LICENSE("GPL");
520MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
521MODULE_DESCRIPTION("Crypto userspace configuration API");
522MODULE_ALIAS("net-pf-16-proto-21");