Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Crypto user configuration API.
4 *
5 * Copyright (C) 2017-2018 Corentin Labbe <clabbe@baylibre.com>
6 *
7 */
8
9#include <crypto/algapi.h>
10#include <crypto/internal/cryptouser.h>
11#include <linux/errno.h>
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/string.h>
15#include <net/netlink.h>
16#include <net/sock.h>
17
18#define null_terminated(x) (strnlen(x, sizeof(x)) < sizeof(x))
19
20struct crypto_dump_info {
21 struct sk_buff *in_skb;
22 struct sk_buff *out_skb;
23 u32 nlmsg_seq;
24 u16 nlmsg_flags;
25};
26
27static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
28{
29 struct crypto_stat_cipher rcipher;
30
31 memset(&rcipher, 0, sizeof(rcipher));
32
33 strscpy(rcipher.type, "cipher", sizeof(rcipher.type));
34
35 return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher);
36}
37
38static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
39{
40 struct crypto_stat_compress rcomp;
41
42 memset(&rcomp, 0, sizeof(rcomp));
43
44 strscpy(rcomp.type, "compression", sizeof(rcomp.type));
45
46 return nla_put(skb, CRYPTOCFGA_STAT_COMPRESS, sizeof(rcomp), &rcomp);
47}
48
49static int crypto_reportstat_one(struct crypto_alg *alg,
50 struct crypto_user_alg *ualg,
51 struct sk_buff *skb)
52{
53 memset(ualg, 0, sizeof(*ualg));
54
55 strscpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name));
56 strscpy(ualg->cru_driver_name, alg->cra_driver_name,
57 sizeof(ualg->cru_driver_name));
58 strscpy(ualg->cru_module_name, module_name(alg->cra_module),
59 sizeof(ualg->cru_module_name));
60
61 ualg->cru_type = 0;
62 ualg->cru_mask = 0;
63 ualg->cru_flags = alg->cra_flags;
64 ualg->cru_refcnt = refcount_read(&alg->cra_refcnt);
65
66 if (nla_put_u32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority))
67 goto nla_put_failure;
68 if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
69 struct crypto_stat_larval rl;
70
71 memset(&rl, 0, sizeof(rl));
72 strscpy(rl.type, "larval", sizeof(rl.type));
73 if (nla_put(skb, CRYPTOCFGA_STAT_LARVAL, sizeof(rl), &rl))
74 goto nla_put_failure;
75 goto out;
76 }
77
78 if (alg->cra_type && alg->cra_type->report_stat) {
79 if (alg->cra_type->report_stat(skb, alg))
80 goto nla_put_failure;
81 goto out;
82 }
83
84 switch (alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL)) {
85 case CRYPTO_ALG_TYPE_CIPHER:
86 if (crypto_report_cipher(skb, alg))
87 goto nla_put_failure;
88 break;
89 case CRYPTO_ALG_TYPE_COMPRESS:
90 if (crypto_report_comp(skb, alg))
91 goto nla_put_failure;
92 break;
93 default:
94 pr_err("ERROR: Unhandled alg %d in %s\n",
95 alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL),
96 __func__);
97 }
98
99out:
100 return 0;
101
102nla_put_failure:
103 return -EMSGSIZE;
104}
105
106static int crypto_reportstat_alg(struct crypto_alg *alg,
107 struct crypto_dump_info *info)
108{
109 struct sk_buff *in_skb = info->in_skb;
110 struct sk_buff *skb = info->out_skb;
111 struct nlmsghdr *nlh;
112 struct crypto_user_alg *ualg;
113 int err = 0;
114
115 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, info->nlmsg_seq,
116 CRYPTO_MSG_GETSTAT, sizeof(*ualg), info->nlmsg_flags);
117 if (!nlh) {
118 err = -EMSGSIZE;
119 goto out;
120 }
121
122 ualg = nlmsg_data(nlh);
123
124 err = crypto_reportstat_one(alg, ualg, skb);
125 if (err) {
126 nlmsg_cancel(skb, nlh);
127 goto out;
128 }
129
130 nlmsg_end(skb, nlh);
131
132out:
133 return err;
134}
135
136int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
137 struct nlattr **attrs)
138{
139 struct net *net = sock_net(in_skb->sk);
140 struct crypto_user_alg *p = nlmsg_data(in_nlh);
141 struct crypto_alg *alg;
142 struct sk_buff *skb;
143 struct crypto_dump_info info;
144 int err;
145
146 if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
147 return -EINVAL;
148
149 alg = crypto_alg_match(p, 0);
150 if (!alg)
151 return -ENOENT;
152
153 err = -ENOMEM;
154 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
155 if (!skb)
156 goto drop_alg;
157
158 info.in_skb = in_skb;
159 info.out_skb = skb;
160 info.nlmsg_seq = in_nlh->nlmsg_seq;
161 info.nlmsg_flags = 0;
162
163 err = crypto_reportstat_alg(alg, &info);
164
165drop_alg:
166 crypto_mod_put(alg);
167
168 if (err) {
169 kfree_skb(skb);
170 return err;
171 }
172
173 return nlmsg_unicast(net->crypto_nlsk, skb, NETLINK_CB(in_skb).portid);
174}
175
176MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Crypto user configuration API.
4 *
5 * Copyright (C) 2017-2018 Corentin Labbe <clabbe@baylibre.com>
6 *
7 */
8
9#include <linux/crypto.h>
10#include <linux/cryptouser.h>
11#include <linux/sched.h>
12#include <net/netlink.h>
13#include <net/sock.h>
14#include <crypto/internal/skcipher.h>
15#include <crypto/internal/rng.h>
16#include <crypto/akcipher.h>
17#include <crypto/kpp.h>
18#include <crypto/internal/cryptouser.h>
19
20#include "internal.h"
21
22#define null_terminated(x) (strnlen(x, sizeof(x)) < sizeof(x))
23
24struct crypto_dump_info {
25 struct sk_buff *in_skb;
26 struct sk_buff *out_skb;
27 u32 nlmsg_seq;
28 u16 nlmsg_flags;
29};
30
31static int crypto_report_aead(struct sk_buff *skb, struct crypto_alg *alg)
32{
33 struct crypto_stat_aead raead;
34
35 memset(&raead, 0, sizeof(raead));
36
37 strscpy(raead.type, "aead", sizeof(raead.type));
38
39 raead.stat_encrypt_cnt = atomic64_read(&alg->stats.aead.encrypt_cnt);
40 raead.stat_encrypt_tlen = atomic64_read(&alg->stats.aead.encrypt_tlen);
41 raead.stat_decrypt_cnt = atomic64_read(&alg->stats.aead.decrypt_cnt);
42 raead.stat_decrypt_tlen = atomic64_read(&alg->stats.aead.decrypt_tlen);
43 raead.stat_err_cnt = atomic64_read(&alg->stats.aead.err_cnt);
44
45 return nla_put(skb, CRYPTOCFGA_STAT_AEAD, sizeof(raead), &raead);
46}
47
48static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
49{
50 struct crypto_stat_cipher rcipher;
51
52 memset(&rcipher, 0, sizeof(rcipher));
53
54 strscpy(rcipher.type, "cipher", sizeof(rcipher.type));
55
56 rcipher.stat_encrypt_cnt = atomic64_read(&alg->stats.cipher.encrypt_cnt);
57 rcipher.stat_encrypt_tlen = atomic64_read(&alg->stats.cipher.encrypt_tlen);
58 rcipher.stat_decrypt_cnt = atomic64_read(&alg->stats.cipher.decrypt_cnt);
59 rcipher.stat_decrypt_tlen = atomic64_read(&alg->stats.cipher.decrypt_tlen);
60 rcipher.stat_err_cnt = atomic64_read(&alg->stats.cipher.err_cnt);
61
62 return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher);
63}
64
65static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
66{
67 struct crypto_stat_compress rcomp;
68
69 memset(&rcomp, 0, sizeof(rcomp));
70
71 strscpy(rcomp.type, "compression", sizeof(rcomp.type));
72 rcomp.stat_compress_cnt = atomic64_read(&alg->stats.compress.compress_cnt);
73 rcomp.stat_compress_tlen = atomic64_read(&alg->stats.compress.compress_tlen);
74 rcomp.stat_decompress_cnt = atomic64_read(&alg->stats.compress.decompress_cnt);
75 rcomp.stat_decompress_tlen = atomic64_read(&alg->stats.compress.decompress_tlen);
76 rcomp.stat_err_cnt = atomic64_read(&alg->stats.compress.err_cnt);
77
78 return nla_put(skb, CRYPTOCFGA_STAT_COMPRESS, sizeof(rcomp), &rcomp);
79}
80
81static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg)
82{
83 struct crypto_stat_compress racomp;
84
85 memset(&racomp, 0, sizeof(racomp));
86
87 strscpy(racomp.type, "acomp", sizeof(racomp.type));
88 racomp.stat_compress_cnt = atomic64_read(&alg->stats.compress.compress_cnt);
89 racomp.stat_compress_tlen = atomic64_read(&alg->stats.compress.compress_tlen);
90 racomp.stat_decompress_cnt = atomic64_read(&alg->stats.compress.decompress_cnt);
91 racomp.stat_decompress_tlen = atomic64_read(&alg->stats.compress.decompress_tlen);
92 racomp.stat_err_cnt = atomic64_read(&alg->stats.compress.err_cnt);
93
94 return nla_put(skb, CRYPTOCFGA_STAT_ACOMP, sizeof(racomp), &racomp);
95}
96
97static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg)
98{
99 struct crypto_stat_akcipher rakcipher;
100
101 memset(&rakcipher, 0, sizeof(rakcipher));
102
103 strscpy(rakcipher.type, "akcipher", sizeof(rakcipher.type));
104 rakcipher.stat_encrypt_cnt = atomic64_read(&alg->stats.akcipher.encrypt_cnt);
105 rakcipher.stat_encrypt_tlen = atomic64_read(&alg->stats.akcipher.encrypt_tlen);
106 rakcipher.stat_decrypt_cnt = atomic64_read(&alg->stats.akcipher.decrypt_cnt);
107 rakcipher.stat_decrypt_tlen = atomic64_read(&alg->stats.akcipher.decrypt_tlen);
108 rakcipher.stat_sign_cnt = atomic64_read(&alg->stats.akcipher.sign_cnt);
109 rakcipher.stat_verify_cnt = atomic64_read(&alg->stats.akcipher.verify_cnt);
110 rakcipher.stat_err_cnt = atomic64_read(&alg->stats.akcipher.err_cnt);
111
112 return nla_put(skb, CRYPTOCFGA_STAT_AKCIPHER,
113 sizeof(rakcipher), &rakcipher);
114}
115
116static int crypto_report_kpp(struct sk_buff *skb, struct crypto_alg *alg)
117{
118 struct crypto_stat_kpp rkpp;
119
120 memset(&rkpp, 0, sizeof(rkpp));
121
122 strscpy(rkpp.type, "kpp", sizeof(rkpp.type));
123
124 rkpp.stat_setsecret_cnt = atomic64_read(&alg->stats.kpp.setsecret_cnt);
125 rkpp.stat_generate_public_key_cnt = atomic64_read(&alg->stats.kpp.generate_public_key_cnt);
126 rkpp.stat_compute_shared_secret_cnt = atomic64_read(&alg->stats.kpp.compute_shared_secret_cnt);
127 rkpp.stat_err_cnt = atomic64_read(&alg->stats.kpp.err_cnt);
128
129 return nla_put(skb, CRYPTOCFGA_STAT_KPP, sizeof(rkpp), &rkpp);
130}
131
132static int crypto_report_ahash(struct sk_buff *skb, struct crypto_alg *alg)
133{
134 struct crypto_stat_hash rhash;
135
136 memset(&rhash, 0, sizeof(rhash));
137
138 strscpy(rhash.type, "ahash", sizeof(rhash.type));
139
140 rhash.stat_hash_cnt = atomic64_read(&alg->stats.hash.hash_cnt);
141 rhash.stat_hash_tlen = atomic64_read(&alg->stats.hash.hash_tlen);
142 rhash.stat_err_cnt = atomic64_read(&alg->stats.hash.err_cnt);
143
144 return nla_put(skb, CRYPTOCFGA_STAT_HASH, sizeof(rhash), &rhash);
145}
146
147static int crypto_report_shash(struct sk_buff *skb, struct crypto_alg *alg)
148{
149 struct crypto_stat_hash rhash;
150
151 memset(&rhash, 0, sizeof(rhash));
152
153 strscpy(rhash.type, "shash", sizeof(rhash.type));
154
155 rhash.stat_hash_cnt = atomic64_read(&alg->stats.hash.hash_cnt);
156 rhash.stat_hash_tlen = atomic64_read(&alg->stats.hash.hash_tlen);
157 rhash.stat_err_cnt = atomic64_read(&alg->stats.hash.err_cnt);
158
159 return nla_put(skb, CRYPTOCFGA_STAT_HASH, sizeof(rhash), &rhash);
160}
161
162static int crypto_report_rng(struct sk_buff *skb, struct crypto_alg *alg)
163{
164 struct crypto_stat_rng rrng;
165
166 memset(&rrng, 0, sizeof(rrng));
167
168 strscpy(rrng.type, "rng", sizeof(rrng.type));
169
170 rrng.stat_generate_cnt = atomic64_read(&alg->stats.rng.generate_cnt);
171 rrng.stat_generate_tlen = atomic64_read(&alg->stats.rng.generate_tlen);
172 rrng.stat_seed_cnt = atomic64_read(&alg->stats.rng.seed_cnt);
173 rrng.stat_err_cnt = atomic64_read(&alg->stats.rng.err_cnt);
174
175 return nla_put(skb, CRYPTOCFGA_STAT_RNG, sizeof(rrng), &rrng);
176}
177
178static int crypto_reportstat_one(struct crypto_alg *alg,
179 struct crypto_user_alg *ualg,
180 struct sk_buff *skb)
181{
182 memset(ualg, 0, sizeof(*ualg));
183
184 strscpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name));
185 strscpy(ualg->cru_driver_name, alg->cra_driver_name,
186 sizeof(ualg->cru_driver_name));
187 strscpy(ualg->cru_module_name, module_name(alg->cra_module),
188 sizeof(ualg->cru_module_name));
189
190 ualg->cru_type = 0;
191 ualg->cru_mask = 0;
192 ualg->cru_flags = alg->cra_flags;
193 ualg->cru_refcnt = refcount_read(&alg->cra_refcnt);
194
195 if (nla_put_u32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority))
196 goto nla_put_failure;
197 if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
198 struct crypto_stat_larval rl;
199
200 memset(&rl, 0, sizeof(rl));
201 strscpy(rl.type, "larval", sizeof(rl.type));
202 if (nla_put(skb, CRYPTOCFGA_STAT_LARVAL, sizeof(rl), &rl))
203 goto nla_put_failure;
204 goto out;
205 }
206
207 switch (alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL)) {
208 case CRYPTO_ALG_TYPE_AEAD:
209 if (crypto_report_aead(skb, alg))
210 goto nla_put_failure;
211 break;
212 case CRYPTO_ALG_TYPE_SKCIPHER:
213 if (crypto_report_cipher(skb, alg))
214 goto nla_put_failure;
215 break;
216 case CRYPTO_ALG_TYPE_BLKCIPHER:
217 if (crypto_report_cipher(skb, alg))
218 goto nla_put_failure;
219 break;
220 case CRYPTO_ALG_TYPE_CIPHER:
221 if (crypto_report_cipher(skb, alg))
222 goto nla_put_failure;
223 break;
224 case CRYPTO_ALG_TYPE_COMPRESS:
225 if (crypto_report_comp(skb, alg))
226 goto nla_put_failure;
227 break;
228 case CRYPTO_ALG_TYPE_ACOMPRESS:
229 if (crypto_report_acomp(skb, alg))
230 goto nla_put_failure;
231 break;
232 case CRYPTO_ALG_TYPE_SCOMPRESS:
233 if (crypto_report_acomp(skb, alg))
234 goto nla_put_failure;
235 break;
236 case CRYPTO_ALG_TYPE_AKCIPHER:
237 if (crypto_report_akcipher(skb, alg))
238 goto nla_put_failure;
239 break;
240 case CRYPTO_ALG_TYPE_KPP:
241 if (crypto_report_kpp(skb, alg))
242 goto nla_put_failure;
243 break;
244 case CRYPTO_ALG_TYPE_AHASH:
245 if (crypto_report_ahash(skb, alg))
246 goto nla_put_failure;
247 break;
248 case CRYPTO_ALG_TYPE_HASH:
249 if (crypto_report_shash(skb, alg))
250 goto nla_put_failure;
251 break;
252 case CRYPTO_ALG_TYPE_RNG:
253 if (crypto_report_rng(skb, alg))
254 goto nla_put_failure;
255 break;
256 default:
257 pr_err("ERROR: Unhandled alg %d in %s\n",
258 alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL),
259 __func__);
260 }
261
262out:
263 return 0;
264
265nla_put_failure:
266 return -EMSGSIZE;
267}
268
269static int crypto_reportstat_alg(struct crypto_alg *alg,
270 struct crypto_dump_info *info)
271{
272 struct sk_buff *in_skb = info->in_skb;
273 struct sk_buff *skb = info->out_skb;
274 struct nlmsghdr *nlh;
275 struct crypto_user_alg *ualg;
276 int err = 0;
277
278 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, info->nlmsg_seq,
279 CRYPTO_MSG_GETSTAT, sizeof(*ualg), info->nlmsg_flags);
280 if (!nlh) {
281 err = -EMSGSIZE;
282 goto out;
283 }
284
285 ualg = nlmsg_data(nlh);
286
287 err = crypto_reportstat_one(alg, ualg, skb);
288 if (err) {
289 nlmsg_cancel(skb, nlh);
290 goto out;
291 }
292
293 nlmsg_end(skb, nlh);
294
295out:
296 return err;
297}
298
299int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
300 struct nlattr **attrs)
301{
302 struct net *net = sock_net(in_skb->sk);
303 struct crypto_user_alg *p = nlmsg_data(in_nlh);
304 struct crypto_alg *alg;
305 struct sk_buff *skb;
306 struct crypto_dump_info info;
307 int err;
308
309 if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
310 return -EINVAL;
311
312 alg = crypto_alg_match(p, 0);
313 if (!alg)
314 return -ENOENT;
315
316 err = -ENOMEM;
317 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
318 if (!skb)
319 goto drop_alg;
320
321 info.in_skb = in_skb;
322 info.out_skb = skb;
323 info.nlmsg_seq = in_nlh->nlmsg_seq;
324 info.nlmsg_flags = 0;
325
326 err = crypto_reportstat_alg(alg, &info);
327
328drop_alg:
329 crypto_mod_put(alg);
330
331 if (err)
332 return err;
333
334 return nlmsg_unicast(net->crypto_nlsk, skb, NETLINK_CB(in_skb).portid);
335}
336
337MODULE_LICENSE("GPL");