Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2 /* Copyright (C) 2004-2006, Advanced Micro Devices, Inc.
3 */
4
5#include <linux/module.h>
6#include <linux/kernel.h>
7#include <linux/pci.h>
8#include <linux/pci_ids.h>
9#include <linux/crypto.h>
10#include <linux/spinlock.h>
11#include <crypto/algapi.h>
12#include <crypto/aes.h>
13#include <crypto/internal/cipher.h>
14#include <crypto/internal/skcipher.h>
15
16#include <linux/io.h>
17#include <linux/delay.h>
18
19#include "geode-aes.h"
20
21/* Static structures */
22
23static void __iomem *_iobase;
24static DEFINE_SPINLOCK(lock);
25
26/* Write a 128 bit field (either a writable key or IV) */
27static inline void
28_writefield(u32 offset, const void *value)
29{
30 int i;
31
32 for (i = 0; i < 4; i++)
33 iowrite32(((const u32 *) value)[i], _iobase + offset + (i * 4));
34}
35
36/* Read a 128 bit field (either a writable key or IV) */
37static inline void
38_readfield(u32 offset, void *value)
39{
40 int i;
41
42 for (i = 0; i < 4; i++)
43 ((u32 *) value)[i] = ioread32(_iobase + offset + (i * 4));
44}
45
46static int
47do_crypt(const void *src, void *dst, u32 len, u32 flags)
48{
49 u32 status;
50 u32 counter = AES_OP_TIMEOUT;
51
52 iowrite32(virt_to_phys((void *)src), _iobase + AES_SOURCEA_REG);
53 iowrite32(virt_to_phys(dst), _iobase + AES_DSTA_REG);
54 iowrite32(len, _iobase + AES_LENA_REG);
55
56 /* Start the operation */
57 iowrite32(AES_CTRL_START | flags, _iobase + AES_CTRLA_REG);
58
59 do {
60 status = ioread32(_iobase + AES_INTR_REG);
61 cpu_relax();
62 } while (!(status & AES_INTRA_PENDING) && --counter);
63
64 /* Clear the event */
65 iowrite32((status & 0xFF) | AES_INTRA_PENDING, _iobase + AES_INTR_REG);
66 return counter ? 0 : 1;
67}
68
69static void
70geode_aes_crypt(const struct geode_aes_tfm_ctx *tctx, const void *src,
71 void *dst, u32 len, u8 *iv, int mode, int dir)
72{
73 u32 flags = 0;
74 unsigned long iflags;
75 int ret;
76
77 /* If the source and destination is the same, then
78 * we need to turn on the coherent flags, otherwise
79 * we don't need to worry
80 */
81
82 flags |= (AES_CTRL_DCA | AES_CTRL_SCA);
83
84 if (dir == AES_DIR_ENCRYPT)
85 flags |= AES_CTRL_ENCRYPT;
86
87 /* Start the critical section */
88
89 spin_lock_irqsave(&lock, iflags);
90
91 if (mode == AES_MODE_CBC) {
92 flags |= AES_CTRL_CBC;
93 _writefield(AES_WRITEIV0_REG, iv);
94 }
95
96 flags |= AES_CTRL_WRKEY;
97 _writefield(AES_WRITEKEY0_REG, tctx->key);
98
99 ret = do_crypt(src, dst, len, flags);
100 BUG_ON(ret);
101
102 if (mode == AES_MODE_CBC)
103 _readfield(AES_WRITEIV0_REG, iv);
104
105 spin_unlock_irqrestore(&lock, iflags);
106}
107
108/* CRYPTO-API Functions */
109
110static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key,
111 unsigned int len)
112{
113 struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
114
115 tctx->keylen = len;
116
117 if (len == AES_KEYSIZE_128) {
118 memcpy(tctx->key, key, len);
119 return 0;
120 }
121
122 if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256)
123 /* not supported at all */
124 return -EINVAL;
125
126 /*
127 * The requested key size is not supported by HW, do a fallback
128 */
129 tctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
130 tctx->fallback.cip->base.crt_flags |=
131 (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
132
133 return crypto_cipher_setkey(tctx->fallback.cip, key, len);
134}
135
136static int geode_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
137 unsigned int len)
138{
139 struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
140
141 tctx->keylen = len;
142
143 if (len == AES_KEYSIZE_128) {
144 memcpy(tctx->key, key, len);
145 return 0;
146 }
147
148 if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256)
149 /* not supported at all */
150 return -EINVAL;
151
152 /*
153 * The requested key size is not supported by HW, do a fallback
154 */
155 crypto_skcipher_clear_flags(tctx->fallback.skcipher,
156 CRYPTO_TFM_REQ_MASK);
157 crypto_skcipher_set_flags(tctx->fallback.skcipher,
158 crypto_skcipher_get_flags(tfm) &
159 CRYPTO_TFM_REQ_MASK);
160 return crypto_skcipher_setkey(tctx->fallback.skcipher, key, len);
161}
162
163static void
164geode_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
165{
166 const struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
167
168 if (unlikely(tctx->keylen != AES_KEYSIZE_128)) {
169 crypto_cipher_encrypt_one(tctx->fallback.cip, out, in);
170 return;
171 }
172
173 geode_aes_crypt(tctx, in, out, AES_BLOCK_SIZE, NULL,
174 AES_MODE_ECB, AES_DIR_ENCRYPT);
175}
176
177
178static void
179geode_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
180{
181 const struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
182
183 if (unlikely(tctx->keylen != AES_KEYSIZE_128)) {
184 crypto_cipher_decrypt_one(tctx->fallback.cip, out, in);
185 return;
186 }
187
188 geode_aes_crypt(tctx, in, out, AES_BLOCK_SIZE, NULL,
189 AES_MODE_ECB, AES_DIR_DECRYPT);
190}
191
192static int fallback_init_cip(struct crypto_tfm *tfm)
193{
194 const char *name = crypto_tfm_alg_name(tfm);
195 struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
196
197 tctx->fallback.cip = crypto_alloc_cipher(name, 0,
198 CRYPTO_ALG_NEED_FALLBACK);
199
200 if (IS_ERR(tctx->fallback.cip)) {
201 printk(KERN_ERR "Error allocating fallback algo %s\n", name);
202 return PTR_ERR(tctx->fallback.cip);
203 }
204
205 return 0;
206}
207
208static void fallback_exit_cip(struct crypto_tfm *tfm)
209{
210 struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
211
212 crypto_free_cipher(tctx->fallback.cip);
213}
214
215static struct crypto_alg geode_alg = {
216 .cra_name = "aes",
217 .cra_driver_name = "geode-aes",
218 .cra_priority = 300,
219 .cra_alignmask = 15,
220 .cra_flags = CRYPTO_ALG_TYPE_CIPHER |
221 CRYPTO_ALG_NEED_FALLBACK,
222 .cra_init = fallback_init_cip,
223 .cra_exit = fallback_exit_cip,
224 .cra_blocksize = AES_BLOCK_SIZE,
225 .cra_ctxsize = sizeof(struct geode_aes_tfm_ctx),
226 .cra_module = THIS_MODULE,
227 .cra_u = {
228 .cipher = {
229 .cia_min_keysize = AES_MIN_KEY_SIZE,
230 .cia_max_keysize = AES_MAX_KEY_SIZE,
231 .cia_setkey = geode_setkey_cip,
232 .cia_encrypt = geode_encrypt,
233 .cia_decrypt = geode_decrypt
234 }
235 }
236};
237
238static int geode_init_skcipher(struct crypto_skcipher *tfm)
239{
240 const char *name = crypto_tfm_alg_name(&tfm->base);
241 struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
242
243 tctx->fallback.skcipher =
244 crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK |
245 CRYPTO_ALG_ASYNC);
246 if (IS_ERR(tctx->fallback.skcipher)) {
247 printk(KERN_ERR "Error allocating fallback algo %s\n", name);
248 return PTR_ERR(tctx->fallback.skcipher);
249 }
250
251 crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
252 crypto_skcipher_reqsize(tctx->fallback.skcipher));
253 return 0;
254}
255
256static void geode_exit_skcipher(struct crypto_skcipher *tfm)
257{
258 struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
259
260 crypto_free_skcipher(tctx->fallback.skcipher);
261}
262
263static int geode_skcipher_crypt(struct skcipher_request *req, int mode, int dir)
264{
265 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
266 const struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
267 struct skcipher_walk walk;
268 unsigned int nbytes;
269 int err;
270
271 if (unlikely(tctx->keylen != AES_KEYSIZE_128)) {
272 struct skcipher_request *subreq = skcipher_request_ctx(req);
273
274 *subreq = *req;
275 skcipher_request_set_tfm(subreq, tctx->fallback.skcipher);
276 if (dir == AES_DIR_DECRYPT)
277 return crypto_skcipher_decrypt(subreq);
278 else
279 return crypto_skcipher_encrypt(subreq);
280 }
281
282 err = skcipher_walk_virt(&walk, req, false);
283
284 while ((nbytes = walk.nbytes) != 0) {
285 geode_aes_crypt(tctx, walk.src.virt.addr, walk.dst.virt.addr,
286 round_down(nbytes, AES_BLOCK_SIZE),
287 walk.iv, mode, dir);
288 err = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE);
289 }
290
291 return err;
292}
293
294static int geode_cbc_encrypt(struct skcipher_request *req)
295{
296 return geode_skcipher_crypt(req, AES_MODE_CBC, AES_DIR_ENCRYPT);
297}
298
299static int geode_cbc_decrypt(struct skcipher_request *req)
300{
301 return geode_skcipher_crypt(req, AES_MODE_CBC, AES_DIR_DECRYPT);
302}
303
304static int geode_ecb_encrypt(struct skcipher_request *req)
305{
306 return geode_skcipher_crypt(req, AES_MODE_ECB, AES_DIR_ENCRYPT);
307}
308
309static int geode_ecb_decrypt(struct skcipher_request *req)
310{
311 return geode_skcipher_crypt(req, AES_MODE_ECB, AES_DIR_DECRYPT);
312}
313
314static struct skcipher_alg geode_skcipher_algs[] = {
315 {
316 .base.cra_name = "cbc(aes)",
317 .base.cra_driver_name = "cbc-aes-geode",
318 .base.cra_priority = 400,
319 .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
320 CRYPTO_ALG_NEED_FALLBACK,
321 .base.cra_blocksize = AES_BLOCK_SIZE,
322 .base.cra_ctxsize = sizeof(struct geode_aes_tfm_ctx),
323 .base.cra_alignmask = 15,
324 .base.cra_module = THIS_MODULE,
325 .init = geode_init_skcipher,
326 .exit = geode_exit_skcipher,
327 .setkey = geode_setkey_skcipher,
328 .encrypt = geode_cbc_encrypt,
329 .decrypt = geode_cbc_decrypt,
330 .min_keysize = AES_MIN_KEY_SIZE,
331 .max_keysize = AES_MAX_KEY_SIZE,
332 .ivsize = AES_BLOCK_SIZE,
333 }, {
334 .base.cra_name = "ecb(aes)",
335 .base.cra_driver_name = "ecb-aes-geode",
336 .base.cra_priority = 400,
337 .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
338 CRYPTO_ALG_NEED_FALLBACK,
339 .base.cra_blocksize = AES_BLOCK_SIZE,
340 .base.cra_ctxsize = sizeof(struct geode_aes_tfm_ctx),
341 .base.cra_alignmask = 15,
342 .base.cra_module = THIS_MODULE,
343 .init = geode_init_skcipher,
344 .exit = geode_exit_skcipher,
345 .setkey = geode_setkey_skcipher,
346 .encrypt = geode_ecb_encrypt,
347 .decrypt = geode_ecb_decrypt,
348 .min_keysize = AES_MIN_KEY_SIZE,
349 .max_keysize = AES_MAX_KEY_SIZE,
350 },
351};
352
353static void geode_aes_remove(struct pci_dev *dev)
354{
355 crypto_unregister_alg(&geode_alg);
356 crypto_unregister_skciphers(geode_skcipher_algs,
357 ARRAY_SIZE(geode_skcipher_algs));
358
359 pci_iounmap(dev, _iobase);
360 _iobase = NULL;
361
362 pci_release_regions(dev);
363 pci_disable_device(dev);
364}
365
366
367static int geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
368{
369 int ret;
370
371 ret = pci_enable_device(dev);
372 if (ret)
373 return ret;
374
375 ret = pci_request_regions(dev, "geode-aes");
376 if (ret)
377 goto eenable;
378
379 _iobase = pci_iomap(dev, 0, 0);
380
381 if (_iobase == NULL) {
382 ret = -ENOMEM;
383 goto erequest;
384 }
385
386 /* Clear any pending activity */
387 iowrite32(AES_INTR_PENDING | AES_INTR_MASK, _iobase + AES_INTR_REG);
388
389 ret = crypto_register_alg(&geode_alg);
390 if (ret)
391 goto eiomap;
392
393 ret = crypto_register_skciphers(geode_skcipher_algs,
394 ARRAY_SIZE(geode_skcipher_algs));
395 if (ret)
396 goto ealg;
397
398 dev_notice(&dev->dev, "GEODE AES engine enabled.\n");
399 return 0;
400
401 ealg:
402 crypto_unregister_alg(&geode_alg);
403
404 eiomap:
405 pci_iounmap(dev, _iobase);
406
407 erequest:
408 pci_release_regions(dev);
409
410 eenable:
411 pci_disable_device(dev);
412
413 dev_err(&dev->dev, "GEODE AES initialization failed.\n");
414 return ret;
415}
416
417static struct pci_device_id geode_aes_tbl[] = {
418 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_LX_AES), },
419 { 0, }
420};
421
422MODULE_DEVICE_TABLE(pci, geode_aes_tbl);
423
424static struct pci_driver geode_aes_driver = {
425 .name = "Geode LX AES",
426 .id_table = geode_aes_tbl,
427 .probe = geode_aes_probe,
428 .remove = geode_aes_remove,
429};
430
431module_pci_driver(geode_aes_driver);
432
433MODULE_AUTHOR("Advanced Micro Devices, Inc.");
434MODULE_DESCRIPTION("Geode LX Hardware AES driver");
435MODULE_LICENSE("GPL");
436MODULE_IMPORT_NS(CRYPTO_INTERNAL);
1// SPDX-License-Identifier: GPL-2.0-or-later
2 /* Copyright (C) 2004-2006, Advanced Micro Devices, Inc.
3 */
4
5#include <linux/module.h>
6#include <linux/kernel.h>
7#include <linux/pci.h>
8#include <linux/pci_ids.h>
9#include <linux/crypto.h>
10#include <linux/spinlock.h>
11#include <crypto/algapi.h>
12#include <crypto/aes.h>
13#include <crypto/internal/skcipher.h>
14
15#include <linux/io.h>
16#include <linux/delay.h>
17
18#include "geode-aes.h"
19
20/* Static structures */
21
22static void __iomem *_iobase;
23static spinlock_t lock;
24
25/* Write a 128 bit field (either a writable key or IV) */
26static inline void
27_writefield(u32 offset, const void *value)
28{
29 int i;
30
31 for (i = 0; i < 4; i++)
32 iowrite32(((const u32 *) value)[i], _iobase + offset + (i * 4));
33}
34
35/* Read a 128 bit field (either a writable key or IV) */
36static inline void
37_readfield(u32 offset, void *value)
38{
39 int i;
40
41 for (i = 0; i < 4; i++)
42 ((u32 *) value)[i] = ioread32(_iobase + offset + (i * 4));
43}
44
45static int
46do_crypt(const void *src, void *dst, u32 len, u32 flags)
47{
48 u32 status;
49 u32 counter = AES_OP_TIMEOUT;
50
51 iowrite32(virt_to_phys((void *)src), _iobase + AES_SOURCEA_REG);
52 iowrite32(virt_to_phys(dst), _iobase + AES_DSTA_REG);
53 iowrite32(len, _iobase + AES_LENA_REG);
54
55 /* Start the operation */
56 iowrite32(AES_CTRL_START | flags, _iobase + AES_CTRLA_REG);
57
58 do {
59 status = ioread32(_iobase + AES_INTR_REG);
60 cpu_relax();
61 } while (!(status & AES_INTRA_PENDING) && --counter);
62
63 /* Clear the event */
64 iowrite32((status & 0xFF) | AES_INTRA_PENDING, _iobase + AES_INTR_REG);
65 return counter ? 0 : 1;
66}
67
68static void
69geode_aes_crypt(const struct geode_aes_tfm_ctx *tctx, const void *src,
70 void *dst, u32 len, u8 *iv, int mode, int dir)
71{
72 u32 flags = 0;
73 unsigned long iflags;
74 int ret;
75
76 /* If the source and destination is the same, then
77 * we need to turn on the coherent flags, otherwise
78 * we don't need to worry
79 */
80
81 flags |= (AES_CTRL_DCA | AES_CTRL_SCA);
82
83 if (dir == AES_DIR_ENCRYPT)
84 flags |= AES_CTRL_ENCRYPT;
85
86 /* Start the critical section */
87
88 spin_lock_irqsave(&lock, iflags);
89
90 if (mode == AES_MODE_CBC) {
91 flags |= AES_CTRL_CBC;
92 _writefield(AES_WRITEIV0_REG, iv);
93 }
94
95 flags |= AES_CTRL_WRKEY;
96 _writefield(AES_WRITEKEY0_REG, tctx->key);
97
98 ret = do_crypt(src, dst, len, flags);
99 BUG_ON(ret);
100
101 if (mode == AES_MODE_CBC)
102 _readfield(AES_WRITEIV0_REG, iv);
103
104 spin_unlock_irqrestore(&lock, iflags);
105}
106
107/* CRYPTO-API Functions */
108
109static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key,
110 unsigned int len)
111{
112 struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
113
114 tctx->keylen = len;
115
116 if (len == AES_KEYSIZE_128) {
117 memcpy(tctx->key, key, len);
118 return 0;
119 }
120
121 if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256)
122 /* not supported at all */
123 return -EINVAL;
124
125 /*
126 * The requested key size is not supported by HW, do a fallback
127 */
128 tctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
129 tctx->fallback.cip->base.crt_flags |=
130 (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
131
132 return crypto_cipher_setkey(tctx->fallback.cip, key, len);
133}
134
135static int geode_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
136 unsigned int len)
137{
138 struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
139
140 tctx->keylen = len;
141
142 if (len == AES_KEYSIZE_128) {
143 memcpy(tctx->key, key, len);
144 return 0;
145 }
146
147 if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256)
148 /* not supported at all */
149 return -EINVAL;
150
151 /*
152 * The requested key size is not supported by HW, do a fallback
153 */
154 crypto_skcipher_clear_flags(tctx->fallback.skcipher,
155 CRYPTO_TFM_REQ_MASK);
156 crypto_skcipher_set_flags(tctx->fallback.skcipher,
157 crypto_skcipher_get_flags(tfm) &
158 CRYPTO_TFM_REQ_MASK);
159 return crypto_skcipher_setkey(tctx->fallback.skcipher, key, len);
160}
161
162static void
163geode_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
164{
165 const struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
166
167 if (unlikely(tctx->keylen != AES_KEYSIZE_128)) {
168 crypto_cipher_encrypt_one(tctx->fallback.cip, out, in);
169 return;
170 }
171
172 geode_aes_crypt(tctx, in, out, AES_BLOCK_SIZE, NULL,
173 AES_MODE_ECB, AES_DIR_ENCRYPT);
174}
175
176
177static void
178geode_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
179{
180 const struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
181
182 if (unlikely(tctx->keylen != AES_KEYSIZE_128)) {
183 crypto_cipher_decrypt_one(tctx->fallback.cip, out, in);
184 return;
185 }
186
187 geode_aes_crypt(tctx, in, out, AES_BLOCK_SIZE, NULL,
188 AES_MODE_ECB, AES_DIR_DECRYPT);
189}
190
191static int fallback_init_cip(struct crypto_tfm *tfm)
192{
193 const char *name = crypto_tfm_alg_name(tfm);
194 struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
195
196 tctx->fallback.cip = crypto_alloc_cipher(name, 0,
197 CRYPTO_ALG_NEED_FALLBACK);
198
199 if (IS_ERR(tctx->fallback.cip)) {
200 printk(KERN_ERR "Error allocating fallback algo %s\n", name);
201 return PTR_ERR(tctx->fallback.cip);
202 }
203
204 return 0;
205}
206
207static void fallback_exit_cip(struct crypto_tfm *tfm)
208{
209 struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
210
211 crypto_free_cipher(tctx->fallback.cip);
212}
213
214static struct crypto_alg geode_alg = {
215 .cra_name = "aes",
216 .cra_driver_name = "geode-aes",
217 .cra_priority = 300,
218 .cra_alignmask = 15,
219 .cra_flags = CRYPTO_ALG_TYPE_CIPHER |
220 CRYPTO_ALG_NEED_FALLBACK,
221 .cra_init = fallback_init_cip,
222 .cra_exit = fallback_exit_cip,
223 .cra_blocksize = AES_BLOCK_SIZE,
224 .cra_ctxsize = sizeof(struct geode_aes_tfm_ctx),
225 .cra_module = THIS_MODULE,
226 .cra_u = {
227 .cipher = {
228 .cia_min_keysize = AES_MIN_KEY_SIZE,
229 .cia_max_keysize = AES_MAX_KEY_SIZE,
230 .cia_setkey = geode_setkey_cip,
231 .cia_encrypt = geode_encrypt,
232 .cia_decrypt = geode_decrypt
233 }
234 }
235};
236
237static int geode_init_skcipher(struct crypto_skcipher *tfm)
238{
239 const char *name = crypto_tfm_alg_name(&tfm->base);
240 struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
241
242 tctx->fallback.skcipher =
243 crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK |
244 CRYPTO_ALG_ASYNC);
245 if (IS_ERR(tctx->fallback.skcipher)) {
246 printk(KERN_ERR "Error allocating fallback algo %s\n", name);
247 return PTR_ERR(tctx->fallback.skcipher);
248 }
249
250 crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
251 crypto_skcipher_reqsize(tctx->fallback.skcipher));
252 return 0;
253}
254
255static void geode_exit_skcipher(struct crypto_skcipher *tfm)
256{
257 struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
258
259 crypto_free_skcipher(tctx->fallback.skcipher);
260}
261
262static int geode_skcipher_crypt(struct skcipher_request *req, int mode, int dir)
263{
264 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
265 const struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
266 struct skcipher_walk walk;
267 unsigned int nbytes;
268 int err;
269
270 if (unlikely(tctx->keylen != AES_KEYSIZE_128)) {
271 struct skcipher_request *subreq = skcipher_request_ctx(req);
272
273 *subreq = *req;
274 skcipher_request_set_tfm(subreq, tctx->fallback.skcipher);
275 if (dir == AES_DIR_DECRYPT)
276 return crypto_skcipher_decrypt(subreq);
277 else
278 return crypto_skcipher_encrypt(subreq);
279 }
280
281 err = skcipher_walk_virt(&walk, req, false);
282
283 while ((nbytes = walk.nbytes) != 0) {
284 geode_aes_crypt(tctx, walk.src.virt.addr, walk.dst.virt.addr,
285 round_down(nbytes, AES_BLOCK_SIZE),
286 walk.iv, mode, dir);
287 err = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE);
288 }
289
290 return err;
291}
292
293static int geode_cbc_encrypt(struct skcipher_request *req)
294{
295 return geode_skcipher_crypt(req, AES_MODE_CBC, AES_DIR_ENCRYPT);
296}
297
298static int geode_cbc_decrypt(struct skcipher_request *req)
299{
300 return geode_skcipher_crypt(req, AES_MODE_CBC, AES_DIR_DECRYPT);
301}
302
303static int geode_ecb_encrypt(struct skcipher_request *req)
304{
305 return geode_skcipher_crypt(req, AES_MODE_ECB, AES_DIR_ENCRYPT);
306}
307
308static int geode_ecb_decrypt(struct skcipher_request *req)
309{
310 return geode_skcipher_crypt(req, AES_MODE_ECB, AES_DIR_DECRYPT);
311}
312
313static struct skcipher_alg geode_skcipher_algs[] = {
314 {
315 .base.cra_name = "cbc(aes)",
316 .base.cra_driver_name = "cbc-aes-geode",
317 .base.cra_priority = 400,
318 .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
319 CRYPTO_ALG_NEED_FALLBACK,
320 .base.cra_blocksize = AES_BLOCK_SIZE,
321 .base.cra_ctxsize = sizeof(struct geode_aes_tfm_ctx),
322 .base.cra_alignmask = 15,
323 .base.cra_module = THIS_MODULE,
324 .init = geode_init_skcipher,
325 .exit = geode_exit_skcipher,
326 .setkey = geode_setkey_skcipher,
327 .encrypt = geode_cbc_encrypt,
328 .decrypt = geode_cbc_decrypt,
329 .min_keysize = AES_MIN_KEY_SIZE,
330 .max_keysize = AES_MAX_KEY_SIZE,
331 .ivsize = AES_BLOCK_SIZE,
332 }, {
333 .base.cra_name = "ecb(aes)",
334 .base.cra_driver_name = "ecb-aes-geode",
335 .base.cra_priority = 400,
336 .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
337 CRYPTO_ALG_NEED_FALLBACK,
338 .base.cra_blocksize = AES_BLOCK_SIZE,
339 .base.cra_ctxsize = sizeof(struct geode_aes_tfm_ctx),
340 .base.cra_alignmask = 15,
341 .base.cra_module = THIS_MODULE,
342 .init = geode_init_skcipher,
343 .exit = geode_exit_skcipher,
344 .setkey = geode_setkey_skcipher,
345 .encrypt = geode_ecb_encrypt,
346 .decrypt = geode_ecb_decrypt,
347 .min_keysize = AES_MIN_KEY_SIZE,
348 .max_keysize = AES_MAX_KEY_SIZE,
349 },
350};
351
352static void geode_aes_remove(struct pci_dev *dev)
353{
354 crypto_unregister_alg(&geode_alg);
355 crypto_unregister_skciphers(geode_skcipher_algs,
356 ARRAY_SIZE(geode_skcipher_algs));
357
358 pci_iounmap(dev, _iobase);
359 _iobase = NULL;
360
361 pci_release_regions(dev);
362 pci_disable_device(dev);
363}
364
365
366static int geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
367{
368 int ret;
369
370 ret = pci_enable_device(dev);
371 if (ret)
372 return ret;
373
374 ret = pci_request_regions(dev, "geode-aes");
375 if (ret)
376 goto eenable;
377
378 _iobase = pci_iomap(dev, 0, 0);
379
380 if (_iobase == NULL) {
381 ret = -ENOMEM;
382 goto erequest;
383 }
384
385 spin_lock_init(&lock);
386
387 /* Clear any pending activity */
388 iowrite32(AES_INTR_PENDING | AES_INTR_MASK, _iobase + AES_INTR_REG);
389
390 ret = crypto_register_alg(&geode_alg);
391 if (ret)
392 goto eiomap;
393
394 ret = crypto_register_skciphers(geode_skcipher_algs,
395 ARRAY_SIZE(geode_skcipher_algs));
396 if (ret)
397 goto ealg;
398
399 dev_notice(&dev->dev, "GEODE AES engine enabled.\n");
400 return 0;
401
402 ealg:
403 crypto_unregister_alg(&geode_alg);
404
405 eiomap:
406 pci_iounmap(dev, _iobase);
407
408 erequest:
409 pci_release_regions(dev);
410
411 eenable:
412 pci_disable_device(dev);
413
414 dev_err(&dev->dev, "GEODE AES initialization failed.\n");
415 return ret;
416}
417
418static struct pci_device_id geode_aes_tbl[] = {
419 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_LX_AES), },
420 { 0, }
421};
422
423MODULE_DEVICE_TABLE(pci, geode_aes_tbl);
424
425static struct pci_driver geode_aes_driver = {
426 .name = "Geode LX AES",
427 .id_table = geode_aes_tbl,
428 .probe = geode_aes_probe,
429 .remove = geode_aes_remove,
430};
431
432module_pci_driver(geode_aes_driver);
433
434MODULE_AUTHOR("Advanced Micro Devices, Inc.");
435MODULE_DESCRIPTION("Geode LX Hardware AES driver");
436MODULE_LICENSE("GPL");