Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Cryptographic API.
4 *
5 * Support for VIA PadLock hardware crypto engine.
6 *
7 * Copyright (c) 2004 Michal Ludvig <michal@logix.cz>
8 *
9 */
10
11#include <crypto/algapi.h>
12#include <crypto/aes.h>
13#include <crypto/internal/skcipher.h>
14#include <crypto/padlock.h>
15#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/types.h>
18#include <linux/errno.h>
19#include <linux/interrupt.h>
20#include <linux/kernel.h>
21#include <linux/mm.h>
22#include <linux/percpu.h>
23#include <linux/smp.h>
24#include <linux/slab.h>
25#include <asm/cpu_device_id.h>
26#include <asm/byteorder.h>
27#include <asm/processor.h>
28#include <asm/fpu/api.h>
29
30/*
31 * Number of data blocks actually fetched for each xcrypt insn.
32 * Processors with prefetch errata will fetch extra blocks.
33 */
34static unsigned int ecb_fetch_blocks = 2;
35#define MAX_ECB_FETCH_BLOCKS (8)
36#define ecb_fetch_bytes (ecb_fetch_blocks * AES_BLOCK_SIZE)
37
38static unsigned int cbc_fetch_blocks = 1;
39#define MAX_CBC_FETCH_BLOCKS (4)
40#define cbc_fetch_bytes (cbc_fetch_blocks * AES_BLOCK_SIZE)
41
42/* Control word. */
43struct cword {
44 unsigned int __attribute__ ((__packed__))
45 rounds:4,
46 algo:3,
47 keygen:1,
48 interm:1,
49 encdec:1,
50 ksize:2;
51} __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
52
53/* Whenever making any changes to the following
54 * structure *make sure* you keep E, d_data
55 * and cword aligned on 16 Bytes boundaries and
56 * the Hardware can access 16 * 16 bytes of E and d_data
57 * (only the first 15 * 16 bytes matter but the HW reads
58 * more).
59 */
60struct aes_ctx {
61 u32 E[AES_MAX_KEYLENGTH_U32]
62 __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
63 u32 d_data[AES_MAX_KEYLENGTH_U32]
64 __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
65 struct {
66 struct cword encrypt;
67 struct cword decrypt;
68 } cword;
69 u32 *D;
70};
71
72static DEFINE_PER_CPU(struct cword *, paes_last_cword);
73
74/* Tells whether the ACE is capable to generate
75 the extended key for a given key_len. */
76static inline int
77aes_hw_extkey_available(uint8_t key_len)
78{
79 /* TODO: We should check the actual CPU model/stepping
80 as it's possible that the capability will be
81 added in the next CPU revisions. */
82 if (key_len == 16)
83 return 1;
84 return 0;
85}
86
87static inline struct aes_ctx *aes_ctx_common(void *ctx)
88{
89 unsigned long addr = (unsigned long)ctx;
90 unsigned long align = PADLOCK_ALIGNMENT;
91
92 if (align <= crypto_tfm_ctx_alignment())
93 align = 1;
94 return (struct aes_ctx *)ALIGN(addr, align);
95}
96
97static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm)
98{
99 return aes_ctx_common(crypto_tfm_ctx(tfm));
100}
101
102static inline struct aes_ctx *skcipher_aes_ctx(struct crypto_skcipher *tfm)
103{
104 return aes_ctx_common(crypto_skcipher_ctx(tfm));
105}
106
107static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
108 unsigned int key_len)
109{
110 struct aes_ctx *ctx = aes_ctx(tfm);
111 const __le32 *key = (const __le32 *)in_key;
112 struct crypto_aes_ctx gen_aes;
113 int cpu;
114
115 if (key_len % 8)
116 return -EINVAL;
117
118 /*
119 * If the hardware is capable of generating the extended key
120 * itself we must supply the plain key for both encryption
121 * and decryption.
122 */
123 ctx->D = ctx->E;
124
125 ctx->E[0] = le32_to_cpu(key[0]);
126 ctx->E[1] = le32_to_cpu(key[1]);
127 ctx->E[2] = le32_to_cpu(key[2]);
128 ctx->E[3] = le32_to_cpu(key[3]);
129
130 /* Prepare control words. */
131 memset(&ctx->cword, 0, sizeof(ctx->cword));
132
133 ctx->cword.decrypt.encdec = 1;
134 ctx->cword.encrypt.rounds = 10 + (key_len - 16) / 4;
135 ctx->cword.decrypt.rounds = ctx->cword.encrypt.rounds;
136 ctx->cword.encrypt.ksize = (key_len - 16) / 8;
137 ctx->cword.decrypt.ksize = ctx->cword.encrypt.ksize;
138
139 /* Don't generate extended keys if the hardware can do it. */
140 if (aes_hw_extkey_available(key_len))
141 goto ok;
142
143 ctx->D = ctx->d_data;
144 ctx->cword.encrypt.keygen = 1;
145 ctx->cword.decrypt.keygen = 1;
146
147 if (aes_expandkey(&gen_aes, in_key, key_len))
148 return -EINVAL;
149
150 memcpy(ctx->E, gen_aes.key_enc, AES_MAX_KEYLENGTH);
151 memcpy(ctx->D, gen_aes.key_dec, AES_MAX_KEYLENGTH);
152
153ok:
154 for_each_online_cpu(cpu)
155 if (&ctx->cword.encrypt == per_cpu(paes_last_cword, cpu) ||
156 &ctx->cword.decrypt == per_cpu(paes_last_cword, cpu))
157 per_cpu(paes_last_cword, cpu) = NULL;
158
159 return 0;
160}
161
162static int aes_set_key_skcipher(struct crypto_skcipher *tfm, const u8 *in_key,
163 unsigned int key_len)
164{
165 return aes_set_key(crypto_skcipher_tfm(tfm), in_key, key_len);
166}
167
168/* ====== Encryption/decryption routines ====== */
169
170/* These are the real call to PadLock. */
171static inline void padlock_reset_key(struct cword *cword)
172{
173 int cpu = raw_smp_processor_id();
174
175 if (cword != per_cpu(paes_last_cword, cpu))
176#ifndef CONFIG_X86_64
177 asm volatile ("pushfl; popfl");
178#else
179 asm volatile ("pushfq; popfq");
180#endif
181}
182
183static inline void padlock_store_cword(struct cword *cword)
184{
185 per_cpu(paes_last_cword, raw_smp_processor_id()) = cword;
186}
187
188/*
189 * While the padlock instructions don't use FP/SSE registers, they
190 * generate a spurious DNA fault when CR0.TS is '1'. Fortunately,
191 * the kernel doesn't use CR0.TS.
192 */
193
194static inline void rep_xcrypt_ecb(const u8 *input, u8 *output, void *key,
195 struct cword *control_word, int count)
196{
197 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
198 : "+S"(input), "+D"(output)
199 : "d"(control_word), "b"(key), "c"(count));
200}
201
202static inline u8 *rep_xcrypt_cbc(const u8 *input, u8 *output, void *key,
203 u8 *iv, struct cword *control_word, int count)
204{
205 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
206 : "+S" (input), "+D" (output), "+a" (iv)
207 : "d" (control_word), "b" (key), "c" (count));
208 return iv;
209}
210
211static void ecb_crypt_copy(const u8 *in, u8 *out, u32 *key,
212 struct cword *cword, int count)
213{
214 /*
215 * Padlock prefetches extra data so we must provide mapped input buffers.
216 * Assume there are at least 16 bytes of stack already in use.
217 */
218 u8 buf[AES_BLOCK_SIZE * (MAX_ECB_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1];
219 u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
220
221 memcpy(tmp, in, count * AES_BLOCK_SIZE);
222 rep_xcrypt_ecb(tmp, out, key, cword, count);
223}
224
225static u8 *cbc_crypt_copy(const u8 *in, u8 *out, u32 *key,
226 u8 *iv, struct cword *cword, int count)
227{
228 /*
229 * Padlock prefetches extra data so we must provide mapped input buffers.
230 * Assume there are at least 16 bytes of stack already in use.
231 */
232 u8 buf[AES_BLOCK_SIZE * (MAX_CBC_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1];
233 u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
234
235 memcpy(tmp, in, count * AES_BLOCK_SIZE);
236 return rep_xcrypt_cbc(tmp, out, key, iv, cword, count);
237}
238
239static inline void ecb_crypt(const u8 *in, u8 *out, u32 *key,
240 struct cword *cword, int count)
241{
242 /* Padlock in ECB mode fetches at least ecb_fetch_bytes of data.
243 * We could avoid some copying here but it's probably not worth it.
244 */
245 if (unlikely(offset_in_page(in) + ecb_fetch_bytes > PAGE_SIZE)) {
246 ecb_crypt_copy(in, out, key, cword, count);
247 return;
248 }
249
250 rep_xcrypt_ecb(in, out, key, cword, count);
251}
252
253static inline u8 *cbc_crypt(const u8 *in, u8 *out, u32 *key,
254 u8 *iv, struct cword *cword, int count)
255{
256 /* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */
257 if (unlikely(offset_in_page(in) + cbc_fetch_bytes > PAGE_SIZE))
258 return cbc_crypt_copy(in, out, key, iv, cword, count);
259
260 return rep_xcrypt_cbc(in, out, key, iv, cword, count);
261}
262
263static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
264 void *control_word, u32 count)
265{
266 u32 initial = count & (ecb_fetch_blocks - 1);
267
268 if (count < ecb_fetch_blocks) {
269 ecb_crypt(input, output, key, control_word, count);
270 return;
271 }
272
273 count -= initial;
274
275 if (initial)
276 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
277 : "+S"(input), "+D"(output)
278 : "d"(control_word), "b"(key), "c"(initial));
279
280 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
281 : "+S"(input), "+D"(output)
282 : "d"(control_word), "b"(key), "c"(count));
283}
284
285static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
286 u8 *iv, void *control_word, u32 count)
287{
288 u32 initial = count & (cbc_fetch_blocks - 1);
289
290 if (count < cbc_fetch_blocks)
291 return cbc_crypt(input, output, key, iv, control_word, count);
292
293 count -= initial;
294
295 if (initial)
296 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
297 : "+S" (input), "+D" (output), "+a" (iv)
298 : "d" (control_word), "b" (key), "c" (initial));
299
300 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
301 : "+S" (input), "+D" (output), "+a" (iv)
302 : "d" (control_word), "b" (key), "c" (count));
303 return iv;
304}
305
306static void padlock_aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
307{
308 struct aes_ctx *ctx = aes_ctx(tfm);
309
310 padlock_reset_key(&ctx->cword.encrypt);
311 ecb_crypt(in, out, ctx->E, &ctx->cword.encrypt, 1);
312 padlock_store_cword(&ctx->cword.encrypt);
313}
314
315static void padlock_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
316{
317 struct aes_ctx *ctx = aes_ctx(tfm);
318
319 padlock_reset_key(&ctx->cword.encrypt);
320 ecb_crypt(in, out, ctx->D, &ctx->cword.decrypt, 1);
321 padlock_store_cword(&ctx->cword.encrypt);
322}
323
324static struct crypto_alg aes_alg = {
325 .cra_name = "aes",
326 .cra_driver_name = "aes-padlock",
327 .cra_priority = PADLOCK_CRA_PRIORITY,
328 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
329 .cra_blocksize = AES_BLOCK_SIZE,
330 .cra_ctxsize = sizeof(struct aes_ctx),
331 .cra_alignmask = PADLOCK_ALIGNMENT - 1,
332 .cra_module = THIS_MODULE,
333 .cra_u = {
334 .cipher = {
335 .cia_min_keysize = AES_MIN_KEY_SIZE,
336 .cia_max_keysize = AES_MAX_KEY_SIZE,
337 .cia_setkey = aes_set_key,
338 .cia_encrypt = padlock_aes_encrypt,
339 .cia_decrypt = padlock_aes_decrypt,
340 }
341 }
342};
343
344static int ecb_aes_encrypt(struct skcipher_request *req)
345{
346 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
347 struct aes_ctx *ctx = skcipher_aes_ctx(tfm);
348 struct skcipher_walk walk;
349 unsigned int nbytes;
350 int err;
351
352 padlock_reset_key(&ctx->cword.encrypt);
353
354 err = skcipher_walk_virt(&walk, req, false);
355
356 while ((nbytes = walk.nbytes) != 0) {
357 padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
358 ctx->E, &ctx->cword.encrypt,
359 nbytes / AES_BLOCK_SIZE);
360 nbytes &= AES_BLOCK_SIZE - 1;
361 err = skcipher_walk_done(&walk, nbytes);
362 }
363
364 padlock_store_cword(&ctx->cword.encrypt);
365
366 return err;
367}
368
369static int ecb_aes_decrypt(struct skcipher_request *req)
370{
371 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
372 struct aes_ctx *ctx = skcipher_aes_ctx(tfm);
373 struct skcipher_walk walk;
374 unsigned int nbytes;
375 int err;
376
377 padlock_reset_key(&ctx->cword.decrypt);
378
379 err = skcipher_walk_virt(&walk, req, false);
380
381 while ((nbytes = walk.nbytes) != 0) {
382 padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
383 ctx->D, &ctx->cword.decrypt,
384 nbytes / AES_BLOCK_SIZE);
385 nbytes &= AES_BLOCK_SIZE - 1;
386 err = skcipher_walk_done(&walk, nbytes);
387 }
388
389 padlock_store_cword(&ctx->cword.encrypt);
390
391 return err;
392}
393
394static struct skcipher_alg ecb_aes_alg = {
395 .base.cra_name = "ecb(aes)",
396 .base.cra_driver_name = "ecb-aes-padlock",
397 .base.cra_priority = PADLOCK_COMPOSITE_PRIORITY,
398 .base.cra_blocksize = AES_BLOCK_SIZE,
399 .base.cra_ctxsize = sizeof(struct aes_ctx),
400 .base.cra_alignmask = PADLOCK_ALIGNMENT - 1,
401 .base.cra_module = THIS_MODULE,
402 .min_keysize = AES_MIN_KEY_SIZE,
403 .max_keysize = AES_MAX_KEY_SIZE,
404 .setkey = aes_set_key_skcipher,
405 .encrypt = ecb_aes_encrypt,
406 .decrypt = ecb_aes_decrypt,
407};
408
409static int cbc_aes_encrypt(struct skcipher_request *req)
410{
411 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
412 struct aes_ctx *ctx = skcipher_aes_ctx(tfm);
413 struct skcipher_walk walk;
414 unsigned int nbytes;
415 int err;
416
417 padlock_reset_key(&ctx->cword.encrypt);
418
419 err = skcipher_walk_virt(&walk, req, false);
420
421 while ((nbytes = walk.nbytes) != 0) {
422 u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr,
423 walk.dst.virt.addr, ctx->E,
424 walk.iv, &ctx->cword.encrypt,
425 nbytes / AES_BLOCK_SIZE);
426 memcpy(walk.iv, iv, AES_BLOCK_SIZE);
427 nbytes &= AES_BLOCK_SIZE - 1;
428 err = skcipher_walk_done(&walk, nbytes);
429 }
430
431 padlock_store_cword(&ctx->cword.decrypt);
432
433 return err;
434}
435
436static int cbc_aes_decrypt(struct skcipher_request *req)
437{
438 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
439 struct aes_ctx *ctx = skcipher_aes_ctx(tfm);
440 struct skcipher_walk walk;
441 unsigned int nbytes;
442 int err;
443
444 padlock_reset_key(&ctx->cword.encrypt);
445
446 err = skcipher_walk_virt(&walk, req, false);
447
448 while ((nbytes = walk.nbytes) != 0) {
449 padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr,
450 ctx->D, walk.iv, &ctx->cword.decrypt,
451 nbytes / AES_BLOCK_SIZE);
452 nbytes &= AES_BLOCK_SIZE - 1;
453 err = skcipher_walk_done(&walk, nbytes);
454 }
455
456 padlock_store_cword(&ctx->cword.encrypt);
457
458 return err;
459}
460
461static struct skcipher_alg cbc_aes_alg = {
462 .base.cra_name = "cbc(aes)",
463 .base.cra_driver_name = "cbc-aes-padlock",
464 .base.cra_priority = PADLOCK_COMPOSITE_PRIORITY,
465 .base.cra_blocksize = AES_BLOCK_SIZE,
466 .base.cra_ctxsize = sizeof(struct aes_ctx),
467 .base.cra_alignmask = PADLOCK_ALIGNMENT - 1,
468 .base.cra_module = THIS_MODULE,
469 .min_keysize = AES_MIN_KEY_SIZE,
470 .max_keysize = AES_MAX_KEY_SIZE,
471 .ivsize = AES_BLOCK_SIZE,
472 .setkey = aes_set_key_skcipher,
473 .encrypt = cbc_aes_encrypt,
474 .decrypt = cbc_aes_decrypt,
475};
476
477static const struct x86_cpu_id padlock_cpu_id[] = {
478 X86_MATCH_FEATURE(X86_FEATURE_XCRYPT, NULL),
479 {}
480};
481MODULE_DEVICE_TABLE(x86cpu, padlock_cpu_id);
482
483static int __init padlock_init(void)
484{
485 int ret;
486 struct cpuinfo_x86 *c = &cpu_data(0);
487
488 if (!x86_match_cpu(padlock_cpu_id))
489 return -ENODEV;
490
491 if (!boot_cpu_has(X86_FEATURE_XCRYPT_EN)) {
492 printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
493 return -ENODEV;
494 }
495
496 if ((ret = crypto_register_alg(&aes_alg)) != 0)
497 goto aes_err;
498
499 if ((ret = crypto_register_skcipher(&ecb_aes_alg)) != 0)
500 goto ecb_aes_err;
501
502 if ((ret = crypto_register_skcipher(&cbc_aes_alg)) != 0)
503 goto cbc_aes_err;
504
505 printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
506
507 if (c->x86 == 6 && c->x86_model == 15 && c->x86_stepping == 2) {
508 ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS;
509 cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS;
510 printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n");
511 }
512
513out:
514 return ret;
515
516cbc_aes_err:
517 crypto_unregister_skcipher(&ecb_aes_alg);
518ecb_aes_err:
519 crypto_unregister_alg(&aes_alg);
520aes_err:
521 printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n");
522 goto out;
523}
524
525static void __exit padlock_fini(void)
526{
527 crypto_unregister_skcipher(&cbc_aes_alg);
528 crypto_unregister_skcipher(&ecb_aes_alg);
529 crypto_unregister_alg(&aes_alg);
530}
531
532module_init(padlock_init);
533module_exit(padlock_fini);
534
535MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
536MODULE_LICENSE("GPL");
537MODULE_AUTHOR("Michal Ludvig");
538
539MODULE_ALIAS_CRYPTO("aes");
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Cryptographic API.
4 *
5 * Support for VIA PadLock hardware crypto engine.
6 *
7 * Copyright (c) 2004 Michal Ludvig <michal@logix.cz>
8 *
9 */
10
11#include <crypto/algapi.h>
12#include <crypto/aes.h>
13#include <crypto/internal/skcipher.h>
14#include <crypto/padlock.h>
15#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/types.h>
18#include <linux/errno.h>
19#include <linux/interrupt.h>
20#include <linux/kernel.h>
21#include <linux/percpu.h>
22#include <linux/smp.h>
23#include <linux/slab.h>
24#include <asm/cpu_device_id.h>
25#include <asm/byteorder.h>
26#include <asm/processor.h>
27#include <asm/fpu/api.h>
28
29/*
30 * Number of data blocks actually fetched for each xcrypt insn.
31 * Processors with prefetch errata will fetch extra blocks.
32 */
33static unsigned int ecb_fetch_blocks = 2;
34#define MAX_ECB_FETCH_BLOCKS (8)
35#define ecb_fetch_bytes (ecb_fetch_blocks * AES_BLOCK_SIZE)
36
37static unsigned int cbc_fetch_blocks = 1;
38#define MAX_CBC_FETCH_BLOCKS (4)
39#define cbc_fetch_bytes (cbc_fetch_blocks * AES_BLOCK_SIZE)
40
41/* Control word. */
42struct cword {
43 unsigned int __attribute__ ((__packed__))
44 rounds:4,
45 algo:3,
46 keygen:1,
47 interm:1,
48 encdec:1,
49 ksize:2;
50} __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
51
52/* Whenever making any changes to the following
53 * structure *make sure* you keep E, d_data
54 * and cword aligned on 16 Bytes boundaries and
55 * the Hardware can access 16 * 16 bytes of E and d_data
56 * (only the first 15 * 16 bytes matter but the HW reads
57 * more).
58 */
59struct aes_ctx {
60 u32 E[AES_MAX_KEYLENGTH_U32]
61 __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
62 u32 d_data[AES_MAX_KEYLENGTH_U32]
63 __attribute__ ((__aligned__(PADLOCK_ALIGNMENT)));
64 struct {
65 struct cword encrypt;
66 struct cword decrypt;
67 } cword;
68 u32 *D;
69};
70
71static DEFINE_PER_CPU(struct cword *, paes_last_cword);
72
73/* Tells whether the ACE is capable to generate
74 the extended key for a given key_len. */
75static inline int
76aes_hw_extkey_available(uint8_t key_len)
77{
78 /* TODO: We should check the actual CPU model/stepping
79 as it's possible that the capability will be
80 added in the next CPU revisions. */
81 if (key_len == 16)
82 return 1;
83 return 0;
84}
85
86static inline struct aes_ctx *aes_ctx_common(void *ctx)
87{
88 unsigned long addr = (unsigned long)ctx;
89 unsigned long align = PADLOCK_ALIGNMENT;
90
91 if (align <= crypto_tfm_ctx_alignment())
92 align = 1;
93 return (struct aes_ctx *)ALIGN(addr, align);
94}
95
96static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm)
97{
98 return aes_ctx_common(crypto_tfm_ctx(tfm));
99}
100
101static inline struct aes_ctx *skcipher_aes_ctx(struct crypto_skcipher *tfm)
102{
103 return aes_ctx_common(crypto_skcipher_ctx(tfm));
104}
105
106static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
107 unsigned int key_len)
108{
109 struct aes_ctx *ctx = aes_ctx(tfm);
110 const __le32 *key = (const __le32 *)in_key;
111 struct crypto_aes_ctx gen_aes;
112 int cpu;
113
114 if (key_len % 8)
115 return -EINVAL;
116
117 /*
118 * If the hardware is capable of generating the extended key
119 * itself we must supply the plain key for both encryption
120 * and decryption.
121 */
122 ctx->D = ctx->E;
123
124 ctx->E[0] = le32_to_cpu(key[0]);
125 ctx->E[1] = le32_to_cpu(key[1]);
126 ctx->E[2] = le32_to_cpu(key[2]);
127 ctx->E[3] = le32_to_cpu(key[3]);
128
129 /* Prepare control words. */
130 memset(&ctx->cword, 0, sizeof(ctx->cword));
131
132 ctx->cword.decrypt.encdec = 1;
133 ctx->cword.encrypt.rounds = 10 + (key_len - 16) / 4;
134 ctx->cword.decrypt.rounds = ctx->cword.encrypt.rounds;
135 ctx->cword.encrypt.ksize = (key_len - 16) / 8;
136 ctx->cword.decrypt.ksize = ctx->cword.encrypt.ksize;
137
138 /* Don't generate extended keys if the hardware can do it. */
139 if (aes_hw_extkey_available(key_len))
140 goto ok;
141
142 ctx->D = ctx->d_data;
143 ctx->cword.encrypt.keygen = 1;
144 ctx->cword.decrypt.keygen = 1;
145
146 if (aes_expandkey(&gen_aes, in_key, key_len))
147 return -EINVAL;
148
149 memcpy(ctx->E, gen_aes.key_enc, AES_MAX_KEYLENGTH);
150 memcpy(ctx->D, gen_aes.key_dec, AES_MAX_KEYLENGTH);
151
152ok:
153 for_each_online_cpu(cpu)
154 if (&ctx->cword.encrypt == per_cpu(paes_last_cword, cpu) ||
155 &ctx->cword.decrypt == per_cpu(paes_last_cword, cpu))
156 per_cpu(paes_last_cword, cpu) = NULL;
157
158 return 0;
159}
160
161static int aes_set_key_skcipher(struct crypto_skcipher *tfm, const u8 *in_key,
162 unsigned int key_len)
163{
164 return aes_set_key(crypto_skcipher_tfm(tfm), in_key, key_len);
165}
166
167/* ====== Encryption/decryption routines ====== */
168
169/* These are the real call to PadLock. */
170static inline void padlock_reset_key(struct cword *cword)
171{
172 int cpu = raw_smp_processor_id();
173
174 if (cword != per_cpu(paes_last_cword, cpu))
175#ifndef CONFIG_X86_64
176 asm volatile ("pushfl; popfl");
177#else
178 asm volatile ("pushfq; popfq");
179#endif
180}
181
182static inline void padlock_store_cword(struct cword *cword)
183{
184 per_cpu(paes_last_cword, raw_smp_processor_id()) = cword;
185}
186
187/*
188 * While the padlock instructions don't use FP/SSE registers, they
189 * generate a spurious DNA fault when CR0.TS is '1'. Fortunately,
190 * the kernel doesn't use CR0.TS.
191 */
192
193static inline void rep_xcrypt_ecb(const u8 *input, u8 *output, void *key,
194 struct cword *control_word, int count)
195{
196 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
197 : "+S"(input), "+D"(output)
198 : "d"(control_word), "b"(key), "c"(count));
199}
200
201static inline u8 *rep_xcrypt_cbc(const u8 *input, u8 *output, void *key,
202 u8 *iv, struct cword *control_word, int count)
203{
204 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
205 : "+S" (input), "+D" (output), "+a" (iv)
206 : "d" (control_word), "b" (key), "c" (count));
207 return iv;
208}
209
210static void ecb_crypt_copy(const u8 *in, u8 *out, u32 *key,
211 struct cword *cword, int count)
212{
213 /*
214 * Padlock prefetches extra data so we must provide mapped input buffers.
215 * Assume there are at least 16 bytes of stack already in use.
216 */
217 u8 buf[AES_BLOCK_SIZE * (MAX_ECB_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1];
218 u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
219
220 memcpy(tmp, in, count * AES_BLOCK_SIZE);
221 rep_xcrypt_ecb(tmp, out, key, cword, count);
222}
223
224static u8 *cbc_crypt_copy(const u8 *in, u8 *out, u32 *key,
225 u8 *iv, struct cword *cword, int count)
226{
227 /*
228 * Padlock prefetches extra data so we must provide mapped input buffers.
229 * Assume there are at least 16 bytes of stack already in use.
230 */
231 u8 buf[AES_BLOCK_SIZE * (MAX_CBC_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1];
232 u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
233
234 memcpy(tmp, in, count * AES_BLOCK_SIZE);
235 return rep_xcrypt_cbc(tmp, out, key, iv, cword, count);
236}
237
238static inline void ecb_crypt(const u8 *in, u8 *out, u32 *key,
239 struct cword *cword, int count)
240{
241 /* Padlock in ECB mode fetches at least ecb_fetch_bytes of data.
242 * We could avoid some copying here but it's probably not worth it.
243 */
244 if (unlikely(offset_in_page(in) + ecb_fetch_bytes > PAGE_SIZE)) {
245 ecb_crypt_copy(in, out, key, cword, count);
246 return;
247 }
248
249 rep_xcrypt_ecb(in, out, key, cword, count);
250}
251
252static inline u8 *cbc_crypt(const u8 *in, u8 *out, u32 *key,
253 u8 *iv, struct cword *cword, int count)
254{
255 /* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */
256 if (unlikely(offset_in_page(in) + cbc_fetch_bytes > PAGE_SIZE))
257 return cbc_crypt_copy(in, out, key, iv, cword, count);
258
259 return rep_xcrypt_cbc(in, out, key, iv, cword, count);
260}
261
262static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
263 void *control_word, u32 count)
264{
265 u32 initial = count & (ecb_fetch_blocks - 1);
266
267 if (count < ecb_fetch_blocks) {
268 ecb_crypt(input, output, key, control_word, count);
269 return;
270 }
271
272 count -= initial;
273
274 if (initial)
275 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
276 : "+S"(input), "+D"(output)
277 : "d"(control_word), "b"(key), "c"(initial));
278
279 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
280 : "+S"(input), "+D"(output)
281 : "d"(control_word), "b"(key), "c"(count));
282}
283
284static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
285 u8 *iv, void *control_word, u32 count)
286{
287 u32 initial = count & (cbc_fetch_blocks - 1);
288
289 if (count < cbc_fetch_blocks)
290 return cbc_crypt(input, output, key, iv, control_word, count);
291
292 count -= initial;
293
294 if (initial)
295 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
296 : "+S" (input), "+D" (output), "+a" (iv)
297 : "d" (control_word), "b" (key), "c" (initial));
298
299 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
300 : "+S" (input), "+D" (output), "+a" (iv)
301 : "d" (control_word), "b" (key), "c" (count));
302 return iv;
303}
304
305static void padlock_aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
306{
307 struct aes_ctx *ctx = aes_ctx(tfm);
308
309 padlock_reset_key(&ctx->cword.encrypt);
310 ecb_crypt(in, out, ctx->E, &ctx->cword.encrypt, 1);
311 padlock_store_cword(&ctx->cword.encrypt);
312}
313
314static void padlock_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
315{
316 struct aes_ctx *ctx = aes_ctx(tfm);
317
318 padlock_reset_key(&ctx->cword.encrypt);
319 ecb_crypt(in, out, ctx->D, &ctx->cword.decrypt, 1);
320 padlock_store_cword(&ctx->cword.encrypt);
321}
322
323static struct crypto_alg aes_alg = {
324 .cra_name = "aes",
325 .cra_driver_name = "aes-padlock",
326 .cra_priority = PADLOCK_CRA_PRIORITY,
327 .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
328 .cra_blocksize = AES_BLOCK_SIZE,
329 .cra_ctxsize = sizeof(struct aes_ctx),
330 .cra_alignmask = PADLOCK_ALIGNMENT - 1,
331 .cra_module = THIS_MODULE,
332 .cra_u = {
333 .cipher = {
334 .cia_min_keysize = AES_MIN_KEY_SIZE,
335 .cia_max_keysize = AES_MAX_KEY_SIZE,
336 .cia_setkey = aes_set_key,
337 .cia_encrypt = padlock_aes_encrypt,
338 .cia_decrypt = padlock_aes_decrypt,
339 }
340 }
341};
342
343static int ecb_aes_encrypt(struct skcipher_request *req)
344{
345 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
346 struct aes_ctx *ctx = skcipher_aes_ctx(tfm);
347 struct skcipher_walk walk;
348 unsigned int nbytes;
349 int err;
350
351 padlock_reset_key(&ctx->cword.encrypt);
352
353 err = skcipher_walk_virt(&walk, req, false);
354
355 while ((nbytes = walk.nbytes) != 0) {
356 padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
357 ctx->E, &ctx->cword.encrypt,
358 nbytes / AES_BLOCK_SIZE);
359 nbytes &= AES_BLOCK_SIZE - 1;
360 err = skcipher_walk_done(&walk, nbytes);
361 }
362
363 padlock_store_cword(&ctx->cword.encrypt);
364
365 return err;
366}
367
368static int ecb_aes_decrypt(struct skcipher_request *req)
369{
370 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
371 struct aes_ctx *ctx = skcipher_aes_ctx(tfm);
372 struct skcipher_walk walk;
373 unsigned int nbytes;
374 int err;
375
376 padlock_reset_key(&ctx->cword.decrypt);
377
378 err = skcipher_walk_virt(&walk, req, false);
379
380 while ((nbytes = walk.nbytes) != 0) {
381 padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr,
382 ctx->D, &ctx->cword.decrypt,
383 nbytes / AES_BLOCK_SIZE);
384 nbytes &= AES_BLOCK_SIZE - 1;
385 err = skcipher_walk_done(&walk, nbytes);
386 }
387
388 padlock_store_cword(&ctx->cword.encrypt);
389
390 return err;
391}
392
393static struct skcipher_alg ecb_aes_alg = {
394 .base.cra_name = "ecb(aes)",
395 .base.cra_driver_name = "ecb-aes-padlock",
396 .base.cra_priority = PADLOCK_COMPOSITE_PRIORITY,
397 .base.cra_blocksize = AES_BLOCK_SIZE,
398 .base.cra_ctxsize = sizeof(struct aes_ctx),
399 .base.cra_alignmask = PADLOCK_ALIGNMENT - 1,
400 .base.cra_module = THIS_MODULE,
401 .min_keysize = AES_MIN_KEY_SIZE,
402 .max_keysize = AES_MAX_KEY_SIZE,
403 .setkey = aes_set_key_skcipher,
404 .encrypt = ecb_aes_encrypt,
405 .decrypt = ecb_aes_decrypt,
406};
407
408static int cbc_aes_encrypt(struct skcipher_request *req)
409{
410 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
411 struct aes_ctx *ctx = skcipher_aes_ctx(tfm);
412 struct skcipher_walk walk;
413 unsigned int nbytes;
414 int err;
415
416 padlock_reset_key(&ctx->cword.encrypt);
417
418 err = skcipher_walk_virt(&walk, req, false);
419
420 while ((nbytes = walk.nbytes) != 0) {
421 u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr,
422 walk.dst.virt.addr, ctx->E,
423 walk.iv, &ctx->cword.encrypt,
424 nbytes / AES_BLOCK_SIZE);
425 memcpy(walk.iv, iv, AES_BLOCK_SIZE);
426 nbytes &= AES_BLOCK_SIZE - 1;
427 err = skcipher_walk_done(&walk, nbytes);
428 }
429
430 padlock_store_cword(&ctx->cword.decrypt);
431
432 return err;
433}
434
435static int cbc_aes_decrypt(struct skcipher_request *req)
436{
437 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
438 struct aes_ctx *ctx = skcipher_aes_ctx(tfm);
439 struct skcipher_walk walk;
440 unsigned int nbytes;
441 int err;
442
443 padlock_reset_key(&ctx->cword.encrypt);
444
445 err = skcipher_walk_virt(&walk, req, false);
446
447 while ((nbytes = walk.nbytes) != 0) {
448 padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr,
449 ctx->D, walk.iv, &ctx->cword.decrypt,
450 nbytes / AES_BLOCK_SIZE);
451 nbytes &= AES_BLOCK_SIZE - 1;
452 err = skcipher_walk_done(&walk, nbytes);
453 }
454
455 padlock_store_cword(&ctx->cword.encrypt);
456
457 return err;
458}
459
460static struct skcipher_alg cbc_aes_alg = {
461 .base.cra_name = "cbc(aes)",
462 .base.cra_driver_name = "cbc-aes-padlock",
463 .base.cra_priority = PADLOCK_COMPOSITE_PRIORITY,
464 .base.cra_blocksize = AES_BLOCK_SIZE,
465 .base.cra_ctxsize = sizeof(struct aes_ctx),
466 .base.cra_alignmask = PADLOCK_ALIGNMENT - 1,
467 .base.cra_module = THIS_MODULE,
468 .min_keysize = AES_MIN_KEY_SIZE,
469 .max_keysize = AES_MAX_KEY_SIZE,
470 .ivsize = AES_BLOCK_SIZE,
471 .setkey = aes_set_key_skcipher,
472 .encrypt = cbc_aes_encrypt,
473 .decrypt = cbc_aes_decrypt,
474};
475
476static const struct x86_cpu_id padlock_cpu_id[] = {
477 X86_MATCH_FEATURE(X86_FEATURE_XCRYPT, NULL),
478 {}
479};
480MODULE_DEVICE_TABLE(x86cpu, padlock_cpu_id);
481
482static int __init padlock_init(void)
483{
484 int ret;
485 struct cpuinfo_x86 *c = &cpu_data(0);
486
487 if (!x86_match_cpu(padlock_cpu_id))
488 return -ENODEV;
489
490 if (!boot_cpu_has(X86_FEATURE_XCRYPT_EN)) {
491 printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
492 return -ENODEV;
493 }
494
495 if ((ret = crypto_register_alg(&aes_alg)) != 0)
496 goto aes_err;
497
498 if ((ret = crypto_register_skcipher(&ecb_aes_alg)) != 0)
499 goto ecb_aes_err;
500
501 if ((ret = crypto_register_skcipher(&cbc_aes_alg)) != 0)
502 goto cbc_aes_err;
503
504 printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
505
506 if (c->x86 == 6 && c->x86_model == 15 && c->x86_stepping == 2) {
507 ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS;
508 cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS;
509 printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n");
510 }
511
512out:
513 return ret;
514
515cbc_aes_err:
516 crypto_unregister_skcipher(&ecb_aes_alg);
517ecb_aes_err:
518 crypto_unregister_alg(&aes_alg);
519aes_err:
520 printk(KERN_ERR PFX "VIA PadLock AES initialization failed.\n");
521 goto out;
522}
523
524static void __exit padlock_fini(void)
525{
526 crypto_unregister_skcipher(&cbc_aes_alg);
527 crypto_unregister_skcipher(&ecb_aes_alg);
528 crypto_unregister_alg(&aes_alg);
529}
530
531module_init(padlock_init);
532module_exit(padlock_fini);
533
534MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
535MODULE_LICENSE("GPL");
536MODULE_AUTHOR("Michal Ludvig");
537
538MODULE_ALIAS_CRYPTO("aes");