Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2 /* Copyright (C) 2004-2006, Advanced Micro Devices, Inc.
 
 
 
 
 
  3  */
  4
  5#include <linux/module.h>
  6#include <linux/kernel.h>
  7#include <linux/pci.h>
  8#include <linux/pci_ids.h>
  9#include <linux/crypto.h>
 10#include <linux/spinlock.h>
 11#include <crypto/algapi.h>
 12#include <crypto/aes.h>
 13#include <crypto/internal/cipher.h>
 14#include <crypto/internal/skcipher.h>
 15
 16#include <linux/io.h>
 17#include <linux/delay.h>
 18
 19#include "geode-aes.h"
 20
 21/* Static structures */
 22
 23static void __iomem *_iobase;
 24static DEFINE_SPINLOCK(lock);
 25
 26/* Write a 128 bit field (either a writable key or IV) */
 27static inline void
 28_writefield(u32 offset, const void *value)
 29{
 30	int i;
 31
 32	for (i = 0; i < 4; i++)
 33		iowrite32(((const u32 *) value)[i], _iobase + offset + (i * 4));
 34}
 35
 36/* Read a 128 bit field (either a writable key or IV) */
 37static inline void
 38_readfield(u32 offset, void *value)
 39{
 40	int i;
 41
 42	for (i = 0; i < 4; i++)
 43		((u32 *) value)[i] = ioread32(_iobase + offset + (i * 4));
 44}
 45
 46static int
 47do_crypt(const void *src, void *dst, u32 len, u32 flags)
 48{
 49	u32 status;
 50	u32 counter = AES_OP_TIMEOUT;
 51
 52	iowrite32(virt_to_phys((void *)src), _iobase + AES_SOURCEA_REG);
 53	iowrite32(virt_to_phys(dst), _iobase + AES_DSTA_REG);
 54	iowrite32(len,  _iobase + AES_LENA_REG);
 55
 56	/* Start the operation */
 57	iowrite32(AES_CTRL_START | flags, _iobase + AES_CTRLA_REG);
 58
 59	do {
 60		status = ioread32(_iobase + AES_INTR_REG);
 61		cpu_relax();
 62	} while (!(status & AES_INTRA_PENDING) && --counter);
 63
 64	/* Clear the event */
 65	iowrite32((status & 0xFF) | AES_INTRA_PENDING, _iobase + AES_INTR_REG);
 66	return counter ? 0 : 1;
 67}
 68
 69static void
 70geode_aes_crypt(const struct geode_aes_tfm_ctx *tctx, const void *src,
 71		void *dst, u32 len, u8 *iv, int mode, int dir)
 72{
 73	u32 flags = 0;
 74	unsigned long iflags;
 75	int ret;
 76
 
 
 
 77	/* If the source and destination is the same, then
 78	 * we need to turn on the coherent flags, otherwise
 79	 * we don't need to worry
 80	 */
 81
 82	flags |= (AES_CTRL_DCA | AES_CTRL_SCA);
 83
 84	if (dir == AES_DIR_ENCRYPT)
 85		flags |= AES_CTRL_ENCRYPT;
 86
 87	/* Start the critical section */
 88
 89	spin_lock_irqsave(&lock, iflags);
 90
 91	if (mode == AES_MODE_CBC) {
 92		flags |= AES_CTRL_CBC;
 93		_writefield(AES_WRITEIV0_REG, iv);
 94	}
 95
 96	flags |= AES_CTRL_WRKEY;
 97	_writefield(AES_WRITEKEY0_REG, tctx->key);
 
 
 98
 99	ret = do_crypt(src, dst, len, flags);
100	BUG_ON(ret);
101
102	if (mode == AES_MODE_CBC)
103		_readfield(AES_WRITEIV0_REG, iv);
104
105	spin_unlock_irqrestore(&lock, iflags);
 
 
106}
107
108/* CRYPTO-API Functions */
109
110static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key,
111		unsigned int len)
112{
113	struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
 
114
115	tctx->keylen = len;
116
117	if (len == AES_KEYSIZE_128) {
118		memcpy(tctx->key, key, len);
119		return 0;
120	}
121
122	if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256)
123		/* not supported at all */
 
124		return -EINVAL;
 
125
126	/*
127	 * The requested key size is not supported by HW, do a fallback
128	 */
129	tctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
130	tctx->fallback.cip->base.crt_flags |=
131		(tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
132
133	return crypto_cipher_setkey(tctx->fallback.cip, key, len);
 
 
 
 
 
134}
135
136static int geode_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
137				 unsigned int len)
138{
139	struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
 
140
141	tctx->keylen = len;
142
143	if (len == AES_KEYSIZE_128) {
144		memcpy(tctx->key, key, len);
145		return 0;
146	}
147
148	if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256)
149		/* not supported at all */
 
150		return -EINVAL;
 
151
152	/*
153	 * The requested key size is not supported by HW, do a fallback
154	 */
155	crypto_skcipher_clear_flags(tctx->fallback.skcipher,
156				    CRYPTO_TFM_REQ_MASK);
157	crypto_skcipher_set_flags(tctx->fallback.skcipher,
158				  crypto_skcipher_get_flags(tfm) &
159				  CRYPTO_TFM_REQ_MASK);
160	return crypto_skcipher_setkey(tctx->fallback.skcipher, key, len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
161}
162
163static void
164geode_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
165{
166	const struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
167
168	if (unlikely(tctx->keylen != AES_KEYSIZE_128)) {
169		crypto_cipher_encrypt_one(tctx->fallback.cip, out, in);
170		return;
171	}
172
173	geode_aes_crypt(tctx, in, out, AES_BLOCK_SIZE, NULL,
174			AES_MODE_ECB, AES_DIR_ENCRYPT);
 
 
 
 
 
 
175}
176
177
178static void
179geode_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
180{
181	const struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
182
183	if (unlikely(tctx->keylen != AES_KEYSIZE_128)) {
184		crypto_cipher_decrypt_one(tctx->fallback.cip, out, in);
185		return;
186	}
187
188	geode_aes_crypt(tctx, in, out, AES_BLOCK_SIZE, NULL,
189			AES_MODE_ECB, AES_DIR_DECRYPT);
 
 
 
 
 
 
190}
191
192static int fallback_init_cip(struct crypto_tfm *tfm)
193{
194	const char *name = crypto_tfm_alg_name(tfm);
195	struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
196
197	tctx->fallback.cip = crypto_alloc_cipher(name, 0,
198						 CRYPTO_ALG_NEED_FALLBACK);
199
200	if (IS_ERR(tctx->fallback.cip)) {
201		printk(KERN_ERR "Error allocating fallback algo %s\n", name);
202		return PTR_ERR(tctx->fallback.cip);
203	}
204
205	return 0;
206}
207
208static void fallback_exit_cip(struct crypto_tfm *tfm)
209{
210	struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
211
212	crypto_free_cipher(tctx->fallback.cip);
 
213}
214
215static struct crypto_alg geode_alg = {
216	.cra_name			=	"aes",
217	.cra_driver_name	=	"geode-aes",
218	.cra_priority		=	300,
219	.cra_alignmask		=	15,
220	.cra_flags			=	CRYPTO_ALG_TYPE_CIPHER |
221							CRYPTO_ALG_NEED_FALLBACK,
222	.cra_init			=	fallback_init_cip,
223	.cra_exit			=	fallback_exit_cip,
224	.cra_blocksize		=	AES_BLOCK_SIZE,
225	.cra_ctxsize		=	sizeof(struct geode_aes_tfm_ctx),
226	.cra_module			=	THIS_MODULE,
227	.cra_u				=	{
228		.cipher	=	{
229			.cia_min_keysize	=	AES_MIN_KEY_SIZE,
230			.cia_max_keysize	=	AES_MAX_KEY_SIZE,
231			.cia_setkey			=	geode_setkey_cip,
232			.cia_encrypt		=	geode_encrypt,
233			.cia_decrypt		=	geode_decrypt
234		}
235	}
236};
237
238static int geode_init_skcipher(struct crypto_skcipher *tfm)
239{
240	const char *name = crypto_tfm_alg_name(&tfm->base);
241	struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
242
243	tctx->fallback.skcipher =
244		crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK |
245				      CRYPTO_ALG_ASYNC);
246	if (IS_ERR(tctx->fallback.skcipher)) {
247		printk(KERN_ERR "Error allocating fallback algo %s\n", name);
248		return PTR_ERR(tctx->fallback.skcipher);
249	}
250
251	crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
252				    crypto_skcipher_reqsize(tctx->fallback.skcipher));
253	return 0;
254}
255
256static void geode_exit_skcipher(struct crypto_skcipher *tfm)
257{
258	struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
259
260	crypto_free_skcipher(tctx->fallback.skcipher);
261}
262
263static int geode_skcipher_crypt(struct skcipher_request *req, int mode, int dir)
264{
265	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
266	const struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
267	struct skcipher_walk walk;
268	unsigned int nbytes;
269	int err;
270
271	if (unlikely(tctx->keylen != AES_KEYSIZE_128)) {
272		struct skcipher_request *subreq = skcipher_request_ctx(req);
273
274		*subreq = *req;
275		skcipher_request_set_tfm(subreq, tctx->fallback.skcipher);
276		if (dir == AES_DIR_DECRYPT)
277			return crypto_skcipher_decrypt(subreq);
278		else
279			return crypto_skcipher_encrypt(subreq);
280	}
281
282	err = skcipher_walk_virt(&walk, req, false);
 
 
 
 
 
 
 
 
 
283
284	while ((nbytes = walk.nbytes) != 0) {
285		geode_aes_crypt(tctx, walk.src.virt.addr, walk.dst.virt.addr,
286				round_down(nbytes, AES_BLOCK_SIZE),
287				walk.iv, mode, dir);
288		err = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
289	}
290
291	return err;
292}
293
294static int geode_cbc_encrypt(struct skcipher_request *req)
295{
296	return geode_skcipher_crypt(req, AES_MODE_CBC, AES_DIR_ENCRYPT);
297}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
298
299static int geode_cbc_decrypt(struct skcipher_request *req)
300{
301	return geode_skcipher_crypt(req, AES_MODE_CBC, AES_DIR_DECRYPT);
302}
303
304static int geode_ecb_encrypt(struct skcipher_request *req)
305{
306	return geode_skcipher_crypt(req, AES_MODE_ECB, AES_DIR_ENCRYPT);
307}
308
309static int geode_ecb_decrypt(struct skcipher_request *req)
310{
311	return geode_skcipher_crypt(req, AES_MODE_ECB, AES_DIR_DECRYPT);
312}
313
314static struct skcipher_alg geode_skcipher_algs[] = {
315	{
316		.base.cra_name		= "cbc(aes)",
317		.base.cra_driver_name	= "cbc-aes-geode",
318		.base.cra_priority	= 400,
319		.base.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
320					  CRYPTO_ALG_NEED_FALLBACK,
321		.base.cra_blocksize	= AES_BLOCK_SIZE,
322		.base.cra_ctxsize	= sizeof(struct geode_aes_tfm_ctx),
323		.base.cra_alignmask	= 15,
324		.base.cra_module	= THIS_MODULE,
325		.init			= geode_init_skcipher,
326		.exit			= geode_exit_skcipher,
327		.setkey			= geode_setkey_skcipher,
328		.encrypt		= geode_cbc_encrypt,
329		.decrypt		= geode_cbc_decrypt,
330		.min_keysize		= AES_MIN_KEY_SIZE,
331		.max_keysize		= AES_MAX_KEY_SIZE,
332		.ivsize			= AES_BLOCK_SIZE,
333	}, {
334		.base.cra_name		= "ecb(aes)",
335		.base.cra_driver_name	= "ecb-aes-geode",
336		.base.cra_priority	= 400,
337		.base.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
338					  CRYPTO_ALG_NEED_FALLBACK,
339		.base.cra_blocksize	= AES_BLOCK_SIZE,
340		.base.cra_ctxsize	= sizeof(struct geode_aes_tfm_ctx),
341		.base.cra_alignmask	= 15,
342		.base.cra_module	= THIS_MODULE,
343		.init			= geode_init_skcipher,
344		.exit			= geode_exit_skcipher,
345		.setkey			= geode_setkey_skcipher,
346		.encrypt		= geode_ecb_encrypt,
347		.decrypt		= geode_ecb_decrypt,
348		.min_keysize		= AES_MIN_KEY_SIZE,
349		.max_keysize		= AES_MAX_KEY_SIZE,
350	},
351};
352
353static void geode_aes_remove(struct pci_dev *dev)
354{
355	crypto_unregister_alg(&geode_alg);
356	crypto_unregister_skciphers(geode_skcipher_algs,
357				    ARRAY_SIZE(geode_skcipher_algs));
358
359	pci_iounmap(dev, _iobase);
360	_iobase = NULL;
361
362	pci_release_regions(dev);
363	pci_disable_device(dev);
364}
365
366
367static int geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
368{
369	int ret;
370
371	ret = pci_enable_device(dev);
372	if (ret)
373		return ret;
374
375	ret = pci_request_regions(dev, "geode-aes");
376	if (ret)
377		goto eenable;
378
379	_iobase = pci_iomap(dev, 0, 0);
380
381	if (_iobase == NULL) {
382		ret = -ENOMEM;
383		goto erequest;
384	}
385
 
 
386	/* Clear any pending activity */
387	iowrite32(AES_INTR_PENDING | AES_INTR_MASK, _iobase + AES_INTR_REG);
388
389	ret = crypto_register_alg(&geode_alg);
390	if (ret)
391		goto eiomap;
392
393	ret = crypto_register_skciphers(geode_skcipher_algs,
394					ARRAY_SIZE(geode_skcipher_algs));
395	if (ret)
396		goto ealg;
397
 
 
 
 
398	dev_notice(&dev->dev, "GEODE AES engine enabled.\n");
399	return 0;
400
 
 
 
401 ealg:
402	crypto_unregister_alg(&geode_alg);
403
404 eiomap:
405	pci_iounmap(dev, _iobase);
406
407 erequest:
408	pci_release_regions(dev);
409
410 eenable:
411	pci_disable_device(dev);
412
413	dev_err(&dev->dev, "GEODE AES initialization failed.\n");
414	return ret;
415}
416
417static struct pci_device_id geode_aes_tbl[] = {
418	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_LX_AES), },
419	{ 0, }
420};
421
422MODULE_DEVICE_TABLE(pci, geode_aes_tbl);
423
424static struct pci_driver geode_aes_driver = {
425	.name = "Geode LX AES",
426	.id_table = geode_aes_tbl,
427	.probe = geode_aes_probe,
428	.remove = geode_aes_remove,
429};
430
431module_pci_driver(geode_aes_driver);
432
433MODULE_AUTHOR("Advanced Micro Devices, Inc.");
434MODULE_DESCRIPTION("Geode LX Hardware AES driver");
435MODULE_LICENSE("GPL");
436MODULE_IMPORT_NS(CRYPTO_INTERNAL);
v4.17
 
  1 /* Copyright (C) 2004-2006, Advanced Micro Devices, Inc.
  2  *
  3  * This program is free software; you can redistribute it and/or modify
  4  * it under the terms of the GNU General Public License as published by
  5  * the Free Software Foundation; either version 2 of the License, or
  6  * (at your option) any later version.
  7  */
  8
  9#include <linux/module.h>
 10#include <linux/kernel.h>
 11#include <linux/pci.h>
 12#include <linux/pci_ids.h>
 13#include <linux/crypto.h>
 14#include <linux/spinlock.h>
 15#include <crypto/algapi.h>
 16#include <crypto/aes.h>
 
 
 17
 18#include <linux/io.h>
 19#include <linux/delay.h>
 20
 21#include "geode-aes.h"
 22
 23/* Static structures */
 24
 25static void __iomem *_iobase;
 26static spinlock_t lock;
 27
 28/* Write a 128 bit field (either a writable key or IV) */
 29static inline void
 30_writefield(u32 offset, void *value)
 31{
 32	int i;
 33
 34	for (i = 0; i < 4; i++)
 35		iowrite32(((u32 *) value)[i], _iobase + offset + (i * 4));
 36}
 37
 38/* Read a 128 bit field (either a writable key or IV) */
 39static inline void
 40_readfield(u32 offset, void *value)
 41{
 42	int i;
 43
 44	for (i = 0; i < 4; i++)
 45		((u32 *) value)[i] = ioread32(_iobase + offset + (i * 4));
 46}
 47
 48static int
 49do_crypt(void *src, void *dst, int len, u32 flags)
 50{
 51	u32 status;
 52	u32 counter = AES_OP_TIMEOUT;
 53
 54	iowrite32(virt_to_phys(src), _iobase + AES_SOURCEA_REG);
 55	iowrite32(virt_to_phys(dst), _iobase + AES_DSTA_REG);
 56	iowrite32(len,  _iobase + AES_LENA_REG);
 57
 58	/* Start the operation */
 59	iowrite32(AES_CTRL_START | flags, _iobase + AES_CTRLA_REG);
 60
 61	do {
 62		status = ioread32(_iobase + AES_INTR_REG);
 63		cpu_relax();
 64	} while (!(status & AES_INTRA_PENDING) && --counter);
 65
 66	/* Clear the event */
 67	iowrite32((status & 0xFF) | AES_INTRA_PENDING, _iobase + AES_INTR_REG);
 68	return counter ? 0 : 1;
 69}
 70
 71static unsigned int
 72geode_aes_crypt(struct geode_aes_op *op)
 
 73{
 74	u32 flags = 0;
 75	unsigned long iflags;
 76	int ret;
 77
 78	if (op->len == 0)
 79		return 0;
 80
 81	/* If the source and destination is the same, then
 82	 * we need to turn on the coherent flags, otherwise
 83	 * we don't need to worry
 84	 */
 85
 86	flags |= (AES_CTRL_DCA | AES_CTRL_SCA);
 87
 88	if (op->dir == AES_DIR_ENCRYPT)
 89		flags |= AES_CTRL_ENCRYPT;
 90
 91	/* Start the critical section */
 92
 93	spin_lock_irqsave(&lock, iflags);
 94
 95	if (op->mode == AES_MODE_CBC) {
 96		flags |= AES_CTRL_CBC;
 97		_writefield(AES_WRITEIV0_REG, op->iv);
 98	}
 99
100	if (!(op->flags & AES_FLAGS_HIDDENKEY)) {
101		flags |= AES_CTRL_WRKEY;
102		_writefield(AES_WRITEKEY0_REG, op->key);
103	}
104
105	ret = do_crypt(op->src, op->dst, op->len, flags);
106	BUG_ON(ret);
107
108	if (op->mode == AES_MODE_CBC)
109		_readfield(AES_WRITEIV0_REG, op->iv);
110
111	spin_unlock_irqrestore(&lock, iflags);
112
113	return op->len;
114}
115
116/* CRYPTO-API Functions */
117
118static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key,
119		unsigned int len)
120{
121	struct geode_aes_op *op = crypto_tfm_ctx(tfm);
122	unsigned int ret;
123
124	op->keylen = len;
125
126	if (len == AES_KEYSIZE_128) {
127		memcpy(op->key, key, len);
128		return 0;
129	}
130
131	if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
132		/* not supported at all */
133		tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
134		return -EINVAL;
135	}
136
137	/*
138	 * The requested key size is not supported by HW, do a fallback
139	 */
140	op->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
141	op->fallback.cip->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
 
142
143	ret = crypto_cipher_setkey(op->fallback.cip, key, len);
144	if (ret) {
145		tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
146		tfm->crt_flags |= (op->fallback.cip->base.crt_flags & CRYPTO_TFM_RES_MASK);
147	}
148	return ret;
149}
150
151static int geode_setkey_blk(struct crypto_tfm *tfm, const u8 *key,
152		unsigned int len)
153{
154	struct geode_aes_op *op = crypto_tfm_ctx(tfm);
155	unsigned int ret;
156
157	op->keylen = len;
158
159	if (len == AES_KEYSIZE_128) {
160		memcpy(op->key, key, len);
161		return 0;
162	}
163
164	if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
165		/* not supported at all */
166		tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
167		return -EINVAL;
168	}
169
170	/*
171	 * The requested key size is not supported by HW, do a fallback
172	 */
173	op->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
174	op->fallback.blk->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
175
176	ret = crypto_blkcipher_setkey(op->fallback.blk, key, len);
177	if (ret) {
178		tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
179		tfm->crt_flags |= (op->fallback.blk->base.crt_flags & CRYPTO_TFM_RES_MASK);
180	}
181	return ret;
182}
183
184static int fallback_blk_dec(struct blkcipher_desc *desc,
185		struct scatterlist *dst, struct scatterlist *src,
186		unsigned int nbytes)
187{
188	unsigned int ret;
189	struct crypto_blkcipher *tfm;
190	struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
191
192	tfm = desc->tfm;
193	desc->tfm = op->fallback.blk;
194
195	ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes);
196
197	desc->tfm = tfm;
198	return ret;
199}
200static int fallback_blk_enc(struct blkcipher_desc *desc,
201		struct scatterlist *dst, struct scatterlist *src,
202		unsigned int nbytes)
203{
204	unsigned int ret;
205	struct crypto_blkcipher *tfm;
206	struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
207
208	tfm = desc->tfm;
209	desc->tfm = op->fallback.blk;
210
211	ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
212
213	desc->tfm = tfm;
214	return ret;
215}
216
217static void
218geode_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
219{
220	struct geode_aes_op *op = crypto_tfm_ctx(tfm);
221
222	if (unlikely(op->keylen != AES_KEYSIZE_128)) {
223		crypto_cipher_encrypt_one(op->fallback.cip, out, in);
224		return;
225	}
226
227	op->src = (void *) in;
228	op->dst = (void *) out;
229	op->mode = AES_MODE_ECB;
230	op->flags = 0;
231	op->len = AES_BLOCK_SIZE;
232	op->dir = AES_DIR_ENCRYPT;
233
234	geode_aes_crypt(op);
235}
236
237
238static void
239geode_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
240{
241	struct geode_aes_op *op = crypto_tfm_ctx(tfm);
242
243	if (unlikely(op->keylen != AES_KEYSIZE_128)) {
244		crypto_cipher_decrypt_one(op->fallback.cip, out, in);
245		return;
246	}
247
248	op->src = (void *) in;
249	op->dst = (void *) out;
250	op->mode = AES_MODE_ECB;
251	op->flags = 0;
252	op->len = AES_BLOCK_SIZE;
253	op->dir = AES_DIR_DECRYPT;
254
255	geode_aes_crypt(op);
256}
257
258static int fallback_init_cip(struct crypto_tfm *tfm)
259{
260	const char *name = crypto_tfm_alg_name(tfm);
261	struct geode_aes_op *op = crypto_tfm_ctx(tfm);
262
263	op->fallback.cip = crypto_alloc_cipher(name, 0,
264				CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
265
266	if (IS_ERR(op->fallback.cip)) {
267		printk(KERN_ERR "Error allocating fallback algo %s\n", name);
268		return PTR_ERR(op->fallback.cip);
269	}
270
271	return 0;
272}
273
274static void fallback_exit_cip(struct crypto_tfm *tfm)
275{
276	struct geode_aes_op *op = crypto_tfm_ctx(tfm);
277
278	crypto_free_cipher(op->fallback.cip);
279	op->fallback.cip = NULL;
280}
281
282static struct crypto_alg geode_alg = {
283	.cra_name			=	"aes",
284	.cra_driver_name	=	"geode-aes",
285	.cra_priority		=	300,
286	.cra_alignmask		=	15,
287	.cra_flags			=	CRYPTO_ALG_TYPE_CIPHER |
288							CRYPTO_ALG_NEED_FALLBACK,
289	.cra_init			=	fallback_init_cip,
290	.cra_exit			=	fallback_exit_cip,
291	.cra_blocksize		=	AES_BLOCK_SIZE,
292	.cra_ctxsize		=	sizeof(struct geode_aes_op),
293	.cra_module			=	THIS_MODULE,
294	.cra_u				=	{
295		.cipher	=	{
296			.cia_min_keysize	=	AES_MIN_KEY_SIZE,
297			.cia_max_keysize	=	AES_MAX_KEY_SIZE,
298			.cia_setkey			=	geode_setkey_cip,
299			.cia_encrypt		=	geode_encrypt,
300			.cia_decrypt		=	geode_decrypt
301		}
302	}
303};
304
305static int
306geode_cbc_decrypt(struct blkcipher_desc *desc,
307		  struct scatterlist *dst, struct scatterlist *src,
308		  unsigned int nbytes)
309{
310	struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
311	struct blkcipher_walk walk;
312	int err, ret;
313
314	if (unlikely(op->keylen != AES_KEYSIZE_128))
315		return fallback_blk_dec(desc, dst, src, nbytes);
316
317	blkcipher_walk_init(&walk, dst, src, nbytes);
318	err = blkcipher_walk_virt(desc, &walk);
319	op->iv = walk.iv;
320
321	while ((nbytes = walk.nbytes)) {
322		op->src = walk.src.virt.addr,
323		op->dst = walk.dst.virt.addr;
324		op->mode = AES_MODE_CBC;
325		op->len = nbytes - (nbytes % AES_BLOCK_SIZE);
326		op->dir = AES_DIR_DECRYPT;
327
328		ret = geode_aes_crypt(op);
329
330		nbytes -= ret;
331		err = blkcipher_walk_done(desc, &walk, nbytes);
 
 
332	}
333
334	return err;
 
 
335}
336
337static int
338geode_cbc_encrypt(struct blkcipher_desc *desc,
339		  struct scatterlist *dst, struct scatterlist *src,
340		  unsigned int nbytes)
341{
342	struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
343	struct blkcipher_walk walk;
344	int err, ret;
345
346	if (unlikely(op->keylen != AES_KEYSIZE_128))
347		return fallback_blk_enc(desc, dst, src, nbytes);
348
349	blkcipher_walk_init(&walk, dst, src, nbytes);
350	err = blkcipher_walk_virt(desc, &walk);
351	op->iv = walk.iv;
352
353	while ((nbytes = walk.nbytes)) {
354		op->src = walk.src.virt.addr,
355		op->dst = walk.dst.virt.addr;
356		op->mode = AES_MODE_CBC;
357		op->len = nbytes - (nbytes % AES_BLOCK_SIZE);
358		op->dir = AES_DIR_ENCRYPT;
359
360		ret = geode_aes_crypt(op);
361		nbytes -= ret;
362		err = blkcipher_walk_done(desc, &walk, nbytes);
363	}
364
365	return err;
366}
367
368static int fallback_init_blk(struct crypto_tfm *tfm)
369{
370	const char *name = crypto_tfm_alg_name(tfm);
371	struct geode_aes_op *op = crypto_tfm_ctx(tfm);
 
 
 
372
373	op->fallback.blk = crypto_alloc_blkcipher(name, 0,
374			CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
375
376	if (IS_ERR(op->fallback.blk)) {
377		printk(KERN_ERR "Error allocating fallback algo %s\n", name);
378		return PTR_ERR(op->fallback.blk);
 
 
 
379	}
380
381	return 0;
382}
383
384static void fallback_exit_blk(struct crypto_tfm *tfm)
385{
386	struct geode_aes_op *op = crypto_tfm_ctx(tfm);
387
388	crypto_free_blkcipher(op->fallback.blk);
389	op->fallback.blk = NULL;
390}
391
392static struct crypto_alg geode_cbc_alg = {
393	.cra_name		=	"cbc(aes)",
394	.cra_driver_name	=	"cbc-aes-geode",
395	.cra_priority		=	400,
396	.cra_flags			=	CRYPTO_ALG_TYPE_BLKCIPHER |
397						CRYPTO_ALG_KERN_DRIVER_ONLY |
398						CRYPTO_ALG_NEED_FALLBACK,
399	.cra_init			=	fallback_init_blk,
400	.cra_exit			=	fallback_exit_blk,
401	.cra_blocksize		=	AES_BLOCK_SIZE,
402	.cra_ctxsize		=	sizeof(struct geode_aes_op),
403	.cra_alignmask		=	15,
404	.cra_type			=	&crypto_blkcipher_type,
405	.cra_module			=	THIS_MODULE,
406	.cra_u				=	{
407		.blkcipher	=	{
408			.min_keysize	=	AES_MIN_KEY_SIZE,
409			.max_keysize	=	AES_MAX_KEY_SIZE,
410			.setkey			=	geode_setkey_blk,
411			.encrypt		=	geode_cbc_encrypt,
412			.decrypt		=	geode_cbc_decrypt,
413			.ivsize			=	AES_BLOCK_SIZE,
414		}
415	}
416};
417
418static int
419geode_ecb_decrypt(struct blkcipher_desc *desc,
420		  struct scatterlist *dst, struct scatterlist *src,
421		  unsigned int nbytes)
422{
423	struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
424	struct blkcipher_walk walk;
425	int err, ret;
426
427	if (unlikely(op->keylen != AES_KEYSIZE_128))
428		return fallback_blk_dec(desc, dst, src, nbytes);
429
430	blkcipher_walk_init(&walk, dst, src, nbytes);
431	err = blkcipher_walk_virt(desc, &walk);
432
433	while ((nbytes = walk.nbytes)) {
434		op->src = walk.src.virt.addr,
435		op->dst = walk.dst.virt.addr;
436		op->mode = AES_MODE_ECB;
437		op->len = nbytes - (nbytes % AES_BLOCK_SIZE);
438		op->dir = AES_DIR_DECRYPT;
439
440		ret = geode_aes_crypt(op);
441		nbytes -= ret;
442		err = blkcipher_walk_done(desc, &walk, nbytes);
443	}
444
445	return err;
446}
447
448static int
449geode_ecb_encrypt(struct blkcipher_desc *desc,
450		  struct scatterlist *dst, struct scatterlist *src,
451		  unsigned int nbytes)
452{
453	struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
454	struct blkcipher_walk walk;
455	int err, ret;
456
457	if (unlikely(op->keylen != AES_KEYSIZE_128))
458		return fallback_blk_enc(desc, dst, src, nbytes);
459
460	blkcipher_walk_init(&walk, dst, src, nbytes);
461	err = blkcipher_walk_virt(desc, &walk);
462
463	while ((nbytes = walk.nbytes)) {
464		op->src = walk.src.virt.addr,
465		op->dst = walk.dst.virt.addr;
466		op->mode = AES_MODE_ECB;
467		op->len = nbytes - (nbytes % AES_BLOCK_SIZE);
468		op->dir = AES_DIR_ENCRYPT;
469
470		ret = geode_aes_crypt(op);
471		nbytes -= ret;
472		ret =  blkcipher_walk_done(desc, &walk, nbytes);
473	}
474
475	return err;
 
 
476}
477
478static struct crypto_alg geode_ecb_alg = {
479	.cra_name			=	"ecb(aes)",
480	.cra_driver_name	=	"ecb-aes-geode",
481	.cra_priority		=	400,
482	.cra_flags			=	CRYPTO_ALG_TYPE_BLKCIPHER |
483						CRYPTO_ALG_KERN_DRIVER_ONLY |
484						CRYPTO_ALG_NEED_FALLBACK,
485	.cra_init			=	fallback_init_blk,
486	.cra_exit			=	fallback_exit_blk,
487	.cra_blocksize		=	AES_BLOCK_SIZE,
488	.cra_ctxsize		=	sizeof(struct geode_aes_op),
489	.cra_alignmask		=	15,
490	.cra_type			=	&crypto_blkcipher_type,
491	.cra_module			=	THIS_MODULE,
492	.cra_u				=	{
493		.blkcipher	=	{
494			.min_keysize	=	AES_MIN_KEY_SIZE,
495			.max_keysize	=	AES_MAX_KEY_SIZE,
496			.setkey			=	geode_setkey_blk,
497			.encrypt		=	geode_ecb_encrypt,
498			.decrypt		=	geode_ecb_decrypt,
499		}
500	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
501};
502
503static void geode_aes_remove(struct pci_dev *dev)
504{
505	crypto_unregister_alg(&geode_alg);
506	crypto_unregister_alg(&geode_ecb_alg);
507	crypto_unregister_alg(&geode_cbc_alg);
508
509	pci_iounmap(dev, _iobase);
510	_iobase = NULL;
511
512	pci_release_regions(dev);
513	pci_disable_device(dev);
514}
515
516
517static int geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
518{
519	int ret;
520
521	ret = pci_enable_device(dev);
522	if (ret)
523		return ret;
524
525	ret = pci_request_regions(dev, "geode-aes");
526	if (ret)
527		goto eenable;
528
529	_iobase = pci_iomap(dev, 0, 0);
530
531	if (_iobase == NULL) {
532		ret = -ENOMEM;
533		goto erequest;
534	}
535
536	spin_lock_init(&lock);
537
538	/* Clear any pending activity */
539	iowrite32(AES_INTR_PENDING | AES_INTR_MASK, _iobase + AES_INTR_REG);
540
541	ret = crypto_register_alg(&geode_alg);
542	if (ret)
543		goto eiomap;
544
545	ret = crypto_register_alg(&geode_ecb_alg);
 
546	if (ret)
547		goto ealg;
548
549	ret = crypto_register_alg(&geode_cbc_alg);
550	if (ret)
551		goto eecb;
552
553	dev_notice(&dev->dev, "GEODE AES engine enabled.\n");
554	return 0;
555
556 eecb:
557	crypto_unregister_alg(&geode_ecb_alg);
558
559 ealg:
560	crypto_unregister_alg(&geode_alg);
561
562 eiomap:
563	pci_iounmap(dev, _iobase);
564
565 erequest:
566	pci_release_regions(dev);
567
568 eenable:
569	pci_disable_device(dev);
570
571	dev_err(&dev->dev, "GEODE AES initialization failed.\n");
572	return ret;
573}
574
575static struct pci_device_id geode_aes_tbl[] = {
576	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_LX_AES), },
577	{ 0, }
578};
579
580MODULE_DEVICE_TABLE(pci, geode_aes_tbl);
581
582static struct pci_driver geode_aes_driver = {
583	.name = "Geode LX AES",
584	.id_table = geode_aes_tbl,
585	.probe = geode_aes_probe,
586	.remove = geode_aes_remove,
587};
588
589module_pci_driver(geode_aes_driver);
590
591MODULE_AUTHOR("Advanced Micro Devices, Inc.");
592MODULE_DESCRIPTION("Geode LX Hardware AES driver");
593MODULE_LICENSE("GPL");