Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Intel IXP4xx NPE-C crypto driver
   4 *
   5 * Copyright (C) 2008 Christian Hohnstaedt <chohnstaedt@innominate.com>
 
 
 
 
 
   6 */
   7
   8#include <linux/platform_device.h>
   9#include <linux/dma-mapping.h>
  10#include <linux/dmapool.h>
  11#include <linux/crypto.h>
  12#include <linux/kernel.h>
  13#include <linux/rtnetlink.h>
  14#include <linux/interrupt.h>
  15#include <linux/spinlock.h>
  16#include <linux/gfp.h>
  17#include <linux/module.h>
  18
  19#include <crypto/ctr.h>
  20#include <crypto/internal/des.h>
  21#include <crypto/aes.h>
  22#include <crypto/hmac.h>
  23#include <crypto/sha.h>
  24#include <crypto/algapi.h>
  25#include <crypto/internal/aead.h>
  26#include <crypto/internal/skcipher.h>
  27#include <crypto/authenc.h>
  28#include <crypto/scatterwalk.h>
  29
  30#include <linux/soc/ixp4xx/npe.h>
  31#include <linux/soc/ixp4xx/qmgr.h>
  32
  33#define MAX_KEYLEN 32
  34
  35/* hash: cfgword + 2 * digestlen; crypt: keylen + cfgword */
  36#define NPE_CTX_LEN 80
  37#define AES_BLOCK128 16
  38
  39#define NPE_OP_HASH_VERIFY   0x01
  40#define NPE_OP_CCM_ENABLE    0x04
  41#define NPE_OP_CRYPT_ENABLE  0x08
  42#define NPE_OP_HASH_ENABLE   0x10
  43#define NPE_OP_NOT_IN_PLACE  0x20
  44#define NPE_OP_HMAC_DISABLE  0x40
  45#define NPE_OP_CRYPT_ENCRYPT 0x80
  46
  47#define NPE_OP_CCM_GEN_MIC   0xcc
  48#define NPE_OP_HASH_GEN_ICV  0x50
  49#define NPE_OP_ENC_GEN_KEY   0xc9
  50
  51#define MOD_ECB     0x0000
  52#define MOD_CTR     0x1000
  53#define MOD_CBC_ENC 0x2000
  54#define MOD_CBC_DEC 0x3000
  55#define MOD_CCM_ENC 0x4000
  56#define MOD_CCM_DEC 0x5000
  57
  58#define KEYLEN_128  4
  59#define KEYLEN_192  6
  60#define KEYLEN_256  8
  61
  62#define CIPH_DECR   0x0000
  63#define CIPH_ENCR   0x0400
  64
  65#define MOD_DES     0x0000
  66#define MOD_TDEA2   0x0100
  67#define MOD_3DES   0x0200
  68#define MOD_AES     0x0800
  69#define MOD_AES128  (0x0800 | KEYLEN_128)
  70#define MOD_AES192  (0x0900 | KEYLEN_192)
  71#define MOD_AES256  (0x0a00 | KEYLEN_256)
  72
  73#define MAX_IVLEN   16
  74#define NPE_ID      2  /* NPE C */
  75#define NPE_QLEN    16
  76/* Space for registering when the first
  77 * NPE_QLEN crypt_ctl are busy */
  78#define NPE_QLEN_TOTAL 64
  79
  80#define SEND_QID    29
  81#define RECV_QID    30
  82
  83#define CTL_FLAG_UNUSED		0x0000
  84#define CTL_FLAG_USED		0x1000
  85#define CTL_FLAG_PERFORM_ABLK	0x0001
  86#define CTL_FLAG_GEN_ICV	0x0002
  87#define CTL_FLAG_GEN_REVAES	0x0004
  88#define CTL_FLAG_PERFORM_AEAD	0x0008
  89#define CTL_FLAG_MASK		0x000f
  90
 
 
  91#define HMAC_PAD_BLOCKLEN SHA1_BLOCK_SIZE
  92
  93#define MD5_DIGEST_SIZE   16
  94
  95struct buffer_desc {
  96	u32 phys_next;
  97#ifdef __ARMEB__
  98	u16 buf_len;
  99	u16 pkt_len;
 100#else
 101	u16 pkt_len;
 102	u16 buf_len;
 103#endif
 104	dma_addr_t phys_addr;
 105	u32 __reserved[4];
 106	struct buffer_desc *next;
 107	enum dma_data_direction dir;
 108};
 109
 110struct crypt_ctl {
 111#ifdef __ARMEB__
 112	u8 mode;		/* NPE_OP_*  operation mode */
 113	u8 init_len;
 114	u16 reserved;
 115#else
 116	u16 reserved;
 117	u8 init_len;
 118	u8 mode;		/* NPE_OP_*  operation mode */
 119#endif
 120	u8 iv[MAX_IVLEN];	/* IV for CBC mode or CTR IV for CTR mode */
 121	dma_addr_t icv_rev_aes;	/* icv or rev aes */
 122	dma_addr_t src_buf;
 123	dma_addr_t dst_buf;
 124#ifdef __ARMEB__
 125	u16 auth_offs;		/* Authentication start offset */
 126	u16 auth_len;		/* Authentication data length */
 127	u16 crypt_offs;		/* Cryption start offset */
 128	u16 crypt_len;		/* Cryption data length */
 129#else
 130	u16 auth_len;		/* Authentication data length */
 131	u16 auth_offs;		/* Authentication start offset */
 132	u16 crypt_len;		/* Cryption data length */
 133	u16 crypt_offs;		/* Cryption start offset */
 134#endif
 135	u32 aadAddr;		/* Additional Auth Data Addr for CCM mode */
 136	u32 crypto_ctx;		/* NPE Crypto Param structure address */
 137
 138	/* Used by Host: 4*4 bytes*/
 139	unsigned ctl_flags;
 140	union {
 141		struct skcipher_request *ablk_req;
 142		struct aead_request *aead_req;
 143		struct crypto_tfm *tfm;
 144	} data;
 145	struct buffer_desc *regist_buf;
 146	u8 *regist_ptr;
 147};
 148
 149struct ablk_ctx {
 150	struct buffer_desc *src;
 151	struct buffer_desc *dst;
 152};
 153
 154struct aead_ctx {
 155	struct buffer_desc *src;
 156	struct buffer_desc *dst;
 157	struct scatterlist ivlist;
 158	/* used when the hmac is not on one sg entry */
 159	u8 *hmac_virt;
 160	int encrypt;
 161};
 162
 163struct ix_hash_algo {
 164	u32 cfgword;
 165	unsigned char *icv;
 166};
 167
 168struct ix_sa_dir {
 169	unsigned char *npe_ctx;
 170	dma_addr_t npe_ctx_phys;
 171	int npe_ctx_idx;
 172	u8 npe_mode;
 173};
 174
 175struct ixp_ctx {
 176	struct ix_sa_dir encrypt;
 177	struct ix_sa_dir decrypt;
 178	int authkey_len;
 179	u8 authkey[MAX_KEYLEN];
 180	int enckey_len;
 181	u8 enckey[MAX_KEYLEN];
 182	u8 salt[MAX_IVLEN];
 183	u8 nonce[CTR_RFC3686_NONCE_SIZE];
 184	unsigned salted;
 185	atomic_t configuring;
 186	struct completion completion;
 187};
 188
 189struct ixp_alg {
 190	struct skcipher_alg crypto;
 191	const struct ix_hash_algo *hash;
 192	u32 cfg_enc;
 193	u32 cfg_dec;
 194
 195	int registered;
 196};
 197
 198struct ixp_aead_alg {
 199	struct aead_alg crypto;
 200	const struct ix_hash_algo *hash;
 201	u32 cfg_enc;
 202	u32 cfg_dec;
 203
 204	int registered;
 205};
 206
 207static const struct ix_hash_algo hash_alg_md5 = {
 208	.cfgword	= 0xAA010004,
 209	.icv		= "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
 210			  "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
 211};
 212static const struct ix_hash_algo hash_alg_sha1 = {
 213	.cfgword	= 0x00000005,
 214	.icv		= "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA"
 215			  "\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0",
 216};
 217
 218static struct npe *npe_c;
 219static struct dma_pool *buffer_pool = NULL;
 220static struct dma_pool *ctx_pool = NULL;
 221
 222static struct crypt_ctl *crypt_virt = NULL;
 223static dma_addr_t crypt_phys;
 224
 225static int support_aes = 1;
 226
 227#define DRIVER_NAME "ixp4xx_crypto"
 228
 229static struct platform_device *pdev;
 230
 231static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
 232{
 233	return crypt_phys + (virt - crypt_virt) * sizeof(struct crypt_ctl);
 234}
 235
 236static inline struct crypt_ctl *crypt_phys2virt(dma_addr_t phys)
 237{
 238	return crypt_virt + (phys - crypt_phys) / sizeof(struct crypt_ctl);
 239}
 240
 241static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm)
 242{
 243	return container_of(tfm->__crt_alg, struct ixp_alg,crypto.base)->cfg_enc;
 244}
 245
 246static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm)
 247{
 248	return container_of(tfm->__crt_alg, struct ixp_alg,crypto.base)->cfg_dec;
 249}
 250
 251static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
 252{
 253	return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->hash;
 254}
 255
 256static int setup_crypt_desc(void)
 257{
 258	struct device *dev = &pdev->dev;
 259	BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
 260	crypt_virt = dma_alloc_coherent(dev,
 261					NPE_QLEN * sizeof(struct crypt_ctl),
 262					&crypt_phys, GFP_ATOMIC);
 263	if (!crypt_virt)
 264		return -ENOMEM;
 
 265	return 0;
 266}
 267
 268static spinlock_t desc_lock;
 269static struct crypt_ctl *get_crypt_desc(void)
 270{
 271	int i;
 272	static int idx = 0;
 273	unsigned long flags;
 274
 275	spin_lock_irqsave(&desc_lock, flags);
 276
 277	if (unlikely(!crypt_virt))
 278		setup_crypt_desc();
 279	if (unlikely(!crypt_virt)) {
 280		spin_unlock_irqrestore(&desc_lock, flags);
 281		return NULL;
 282	}
 283	i = idx;
 284	if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
 285		if (++idx >= NPE_QLEN)
 286			idx = 0;
 287		crypt_virt[i].ctl_flags = CTL_FLAG_USED;
 288		spin_unlock_irqrestore(&desc_lock, flags);
 289		return crypt_virt +i;
 290	} else {
 291		spin_unlock_irqrestore(&desc_lock, flags);
 292		return NULL;
 293	}
 294}
 295
 296static spinlock_t emerg_lock;
 297static struct crypt_ctl *get_crypt_desc_emerg(void)
 298{
 299	int i;
 300	static int idx = NPE_QLEN;
 301	struct crypt_ctl *desc;
 302	unsigned long flags;
 303
 304	desc = get_crypt_desc();
 305	if (desc)
 306		return desc;
 307	if (unlikely(!crypt_virt))
 308		return NULL;
 309
 310	spin_lock_irqsave(&emerg_lock, flags);
 311	i = idx;
 312	if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
 313		if (++idx >= NPE_QLEN_TOTAL)
 314			idx = NPE_QLEN;
 315		crypt_virt[i].ctl_flags = CTL_FLAG_USED;
 316		spin_unlock_irqrestore(&emerg_lock, flags);
 317		return crypt_virt +i;
 318	} else {
 319		spin_unlock_irqrestore(&emerg_lock, flags);
 320		return NULL;
 321	}
 322}
 323
 324static void free_buf_chain(struct device *dev, struct buffer_desc *buf,
 325			   dma_addr_t phys)
 326{
 327	while (buf) {
 328		struct buffer_desc *buf1;
 329		u32 phys1;
 330
 331		buf1 = buf->next;
 332		phys1 = buf->phys_next;
 333		dma_unmap_single(dev, buf->phys_next, buf->buf_len, buf->dir);
 334		dma_pool_free(buffer_pool, buf, phys);
 335		buf = buf1;
 336		phys = phys1;
 337	}
 338}
 339
 340static struct tasklet_struct crypto_done_tasklet;
 341
 342static void finish_scattered_hmac(struct crypt_ctl *crypt)
 343{
 344	struct aead_request *req = crypt->data.aead_req;
 345	struct aead_ctx *req_ctx = aead_request_ctx(req);
 346	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 347	int authsize = crypto_aead_authsize(tfm);
 348	int decryptlen = req->assoclen + req->cryptlen - authsize;
 349
 350	if (req_ctx->encrypt) {
 351		scatterwalk_map_and_copy(req_ctx->hmac_virt,
 352			req->dst, decryptlen, authsize, 1);
 353	}
 354	dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes);
 355}
 356
 357static void one_packet(dma_addr_t phys)
 358{
 359	struct device *dev = &pdev->dev;
 360	struct crypt_ctl *crypt;
 361	struct ixp_ctx *ctx;
 362	int failed;
 363
 364	failed = phys & 0x1 ? -EBADMSG : 0;
 365	phys &= ~0x3;
 366	crypt = crypt_phys2virt(phys);
 367
 368	switch (crypt->ctl_flags & CTL_FLAG_MASK) {
 369	case CTL_FLAG_PERFORM_AEAD: {
 370		struct aead_request *req = crypt->data.aead_req;
 371		struct aead_ctx *req_ctx = aead_request_ctx(req);
 372
 373		free_buf_chain(dev, req_ctx->src, crypt->src_buf);
 374		free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
 375		if (req_ctx->hmac_virt) {
 376			finish_scattered_hmac(crypt);
 377		}
 378		req->base.complete(&req->base, failed);
 379		break;
 380	}
 381	case CTL_FLAG_PERFORM_ABLK: {
 382		struct skcipher_request *req = crypt->data.ablk_req;
 383		struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
 384
 385		if (req_ctx->dst) {
 386			free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
 387		}
 388		free_buf_chain(dev, req_ctx->src, crypt->src_buf);
 389		req->base.complete(&req->base, failed);
 390		break;
 391	}
 392	case CTL_FLAG_GEN_ICV:
 393		ctx = crypto_tfm_ctx(crypt->data.tfm);
 394		dma_pool_free(ctx_pool, crypt->regist_ptr,
 395				crypt->regist_buf->phys_addr);
 396		dma_pool_free(buffer_pool, crypt->regist_buf, crypt->src_buf);
 397		if (atomic_dec_and_test(&ctx->configuring))
 398			complete(&ctx->completion);
 399		break;
 400	case CTL_FLAG_GEN_REVAES:
 401		ctx = crypto_tfm_ctx(crypt->data.tfm);
 402		*(u32*)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR);
 403		if (atomic_dec_and_test(&ctx->configuring))
 404			complete(&ctx->completion);
 405		break;
 406	default:
 407		BUG();
 408	}
 409	crypt->ctl_flags = CTL_FLAG_UNUSED;
 410}
 411
 412static void irqhandler(void *_unused)
 413{
 414	tasklet_schedule(&crypto_done_tasklet);
 415}
 416
 417static void crypto_done_action(unsigned long arg)
 418{
 419	int i;
 420
 421	for(i=0; i<4; i++) {
 422		dma_addr_t phys = qmgr_get_entry(RECV_QID);
 423		if (!phys)
 424			return;
 425		one_packet(phys);
 426	}
 427	tasklet_schedule(&crypto_done_tasklet);
 428}
 429
 430static int init_ixp_crypto(struct device *dev)
 431{
 432	int ret = -ENODEV;
 433	u32 msg[2] = { 0, 0 };
 434
 435	if (! ( ~(*IXP4XX_EXP_CFG2) & (IXP4XX_FEATURE_HASH |
 436				IXP4XX_FEATURE_AES | IXP4XX_FEATURE_DES))) {
 437		printk(KERN_ERR "ixp_crypto: No HW crypto available\n");
 438		return ret;
 439	}
 440	npe_c = npe_request(NPE_ID);
 441	if (!npe_c)
 442		return ret;
 443
 444	if (!npe_running(npe_c)) {
 445		ret = npe_load_firmware(npe_c, npe_name(npe_c), dev);
 446		if (ret)
 447			goto npe_release;
 
 448		if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
 449			goto npe_error;
 450	} else {
 451		if (npe_send_message(npe_c, msg, "STATUS_MSG"))
 452			goto npe_error;
 453
 454		if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
 455			goto npe_error;
 456	}
 457
 458	switch ((msg[1]>>16) & 0xff) {
 459	case 3:
 460		printk(KERN_WARNING "Firmware of %s lacks AES support\n",
 461				npe_name(npe_c));
 462		support_aes = 0;
 463		break;
 464	case 4:
 465	case 5:
 466		support_aes = 1;
 467		break;
 468	default:
 469		printk(KERN_ERR "Firmware of %s lacks crypto support\n",
 470			npe_name(npe_c));
 471		ret = -ENODEV;
 472		goto npe_release;
 473	}
 474	/* buffer_pool will also be used to sometimes store the hmac,
 475	 * so assure it is large enough
 476	 */
 477	BUILD_BUG_ON(SHA1_DIGEST_SIZE > sizeof(struct buffer_desc));
 478	buffer_pool = dma_pool_create("buffer", dev,
 479			sizeof(struct buffer_desc), 32, 0);
 480	ret = -ENOMEM;
 481	if (!buffer_pool) {
 482		goto err;
 483	}
 484	ctx_pool = dma_pool_create("context", dev,
 485			NPE_CTX_LEN, 16, 0);
 486	if (!ctx_pool) {
 487		goto err;
 488	}
 489	ret = qmgr_request_queue(SEND_QID, NPE_QLEN_TOTAL, 0, 0,
 490				 "ixp_crypto:out", NULL);
 491	if (ret)
 492		goto err;
 493	ret = qmgr_request_queue(RECV_QID, NPE_QLEN, 0, 0,
 494				 "ixp_crypto:in", NULL);
 495	if (ret) {
 496		qmgr_release_queue(SEND_QID);
 497		goto err;
 498	}
 499	qmgr_set_irq(RECV_QID, QUEUE_IRQ_SRC_NOT_EMPTY, irqhandler, NULL);
 500	tasklet_init(&crypto_done_tasklet, crypto_done_action, 0);
 501
 502	qmgr_enable_irq(RECV_QID);
 503	return 0;
 504
 505npe_error:
 506	printk(KERN_ERR "%s not responding\n", npe_name(npe_c));
 507	ret = -EIO;
 508err:
 509	dma_pool_destroy(ctx_pool);
 510	dma_pool_destroy(buffer_pool);
 511npe_release:
 
 512	npe_release(npe_c);
 513	return ret;
 514}
 515
 516static void release_ixp_crypto(struct device *dev)
 517{
 518	qmgr_disable_irq(RECV_QID);
 519	tasklet_kill(&crypto_done_tasklet);
 520
 521	qmgr_release_queue(SEND_QID);
 522	qmgr_release_queue(RECV_QID);
 523
 524	dma_pool_destroy(ctx_pool);
 525	dma_pool_destroy(buffer_pool);
 526
 527	npe_release(npe_c);
 528
 529	if (crypt_virt) {
 530		dma_free_coherent(dev,
 531			NPE_QLEN_TOTAL * sizeof( struct crypt_ctl),
 532			crypt_virt, crypt_phys);
 533	}
 
 534}
 535
 536static void reset_sa_dir(struct ix_sa_dir *dir)
 537{
 538	memset(dir->npe_ctx, 0, NPE_CTX_LEN);
 539	dir->npe_ctx_idx = 0;
 540	dir->npe_mode = 0;
 541}
 542
 543static int init_sa_dir(struct ix_sa_dir *dir)
 544{
 545	dir->npe_ctx = dma_pool_alloc(ctx_pool, GFP_KERNEL, &dir->npe_ctx_phys);
 546	if (!dir->npe_ctx) {
 547		return -ENOMEM;
 548	}
 549	reset_sa_dir(dir);
 550	return 0;
 551}
 552
 553static void free_sa_dir(struct ix_sa_dir *dir)
 554{
 555	memset(dir->npe_ctx, 0, NPE_CTX_LEN);
 556	dma_pool_free(ctx_pool, dir->npe_ctx, dir->npe_ctx_phys);
 557}
 558
 559static int init_tfm(struct crypto_tfm *tfm)
 560{
 561	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
 562	int ret;
 563
 564	atomic_set(&ctx->configuring, 0);
 565	ret = init_sa_dir(&ctx->encrypt);
 566	if (ret)
 567		return ret;
 568	ret = init_sa_dir(&ctx->decrypt);
 569	if (ret) {
 570		free_sa_dir(&ctx->encrypt);
 571	}
 572	return ret;
 573}
 574
 575static int init_tfm_ablk(struct crypto_skcipher *tfm)
 576{
 577	crypto_skcipher_set_reqsize(tfm, sizeof(struct ablk_ctx));
 578	return init_tfm(crypto_skcipher_tfm(tfm));
 579}
 580
 581static int init_tfm_aead(struct crypto_aead *tfm)
 582{
 583	crypto_aead_set_reqsize(tfm, sizeof(struct aead_ctx));
 584	return init_tfm(crypto_aead_tfm(tfm));
 585}
 586
 587static void exit_tfm(struct crypto_tfm *tfm)
 588{
 589	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
 590	free_sa_dir(&ctx->encrypt);
 591	free_sa_dir(&ctx->decrypt);
 592}
 593
 594static void exit_tfm_ablk(struct crypto_skcipher *tfm)
 595{
 596	exit_tfm(crypto_skcipher_tfm(tfm));
 597}
 598
 599static void exit_tfm_aead(struct crypto_aead *tfm)
 600{
 601	exit_tfm(crypto_aead_tfm(tfm));
 602}
 603
 604static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
 605		int init_len, u32 ctx_addr, const u8 *key, int key_len)
 606{
 607	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
 608	struct crypt_ctl *crypt;
 609	struct buffer_desc *buf;
 610	int i;
 611	u8 *pad;
 612	dma_addr_t pad_phys, buf_phys;
 613
 614	BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN);
 615	pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys);
 616	if (!pad)
 617		return -ENOMEM;
 618	buf = dma_pool_alloc(buffer_pool, GFP_KERNEL, &buf_phys);
 619	if (!buf) {
 620		dma_pool_free(ctx_pool, pad, pad_phys);
 621		return -ENOMEM;
 622	}
 623	crypt = get_crypt_desc_emerg();
 624	if (!crypt) {
 625		dma_pool_free(ctx_pool, pad, pad_phys);
 626		dma_pool_free(buffer_pool, buf, buf_phys);
 627		return -EAGAIN;
 628	}
 629
 630	memcpy(pad, key, key_len);
 631	memset(pad + key_len, 0, HMAC_PAD_BLOCKLEN - key_len);
 632	for (i = 0; i < HMAC_PAD_BLOCKLEN; i++) {
 633		pad[i] ^= xpad;
 634	}
 635
 636	crypt->data.tfm = tfm;
 637	crypt->regist_ptr = pad;
 638	crypt->regist_buf = buf;
 639
 640	crypt->auth_offs = 0;
 641	crypt->auth_len = HMAC_PAD_BLOCKLEN;
 642	crypt->crypto_ctx = ctx_addr;
 643	crypt->src_buf = buf_phys;
 644	crypt->icv_rev_aes = target;
 645	crypt->mode = NPE_OP_HASH_GEN_ICV;
 646	crypt->init_len = init_len;
 647	crypt->ctl_flags |= CTL_FLAG_GEN_ICV;
 648
 649	buf->next = 0;
 650	buf->buf_len = HMAC_PAD_BLOCKLEN;
 651	buf->pkt_len = 0;
 652	buf->phys_addr = pad_phys;
 653
 654	atomic_inc(&ctx->configuring);
 655	qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
 656	BUG_ON(qmgr_stat_overflow(SEND_QID));
 657	return 0;
 658}
 659
 660static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned authsize,
 661		const u8 *key, int key_len, unsigned digest_len)
 662{
 663	u32 itarget, otarget, npe_ctx_addr;
 664	unsigned char *cinfo;
 665	int init_len, ret = 0;
 666	u32 cfgword;
 667	struct ix_sa_dir *dir;
 668	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
 669	const struct ix_hash_algo *algo;
 670
 671	dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
 672	cinfo = dir->npe_ctx + dir->npe_ctx_idx;
 673	algo = ix_hash(tfm);
 674
 675	/* write cfg word to cryptinfo */
 676	cfgword = algo->cfgword | ( authsize << 6); /* (authsize/4) << 8 */
 677#ifndef __ARMEB__
 678	cfgword ^= 0xAA000000; /* change the "byte swap" flags */
 679#endif
 680	*(u32*)cinfo = cpu_to_be32(cfgword);
 681	cinfo += sizeof(cfgword);
 682
 683	/* write ICV to cryptinfo */
 684	memcpy(cinfo, algo->icv, digest_len);
 685	cinfo += digest_len;
 686
 687	itarget = dir->npe_ctx_phys + dir->npe_ctx_idx
 688				+ sizeof(algo->cfgword);
 689	otarget = itarget + digest_len;
 690	init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx);
 691	npe_ctx_addr = dir->npe_ctx_phys + dir->npe_ctx_idx;
 692
 693	dir->npe_ctx_idx += init_len;
 694	dir->npe_mode |= NPE_OP_HASH_ENABLE;
 695
 696	if (!encrypt)
 697		dir->npe_mode |= NPE_OP_HASH_VERIFY;
 698
 699	ret = register_chain_var(tfm, HMAC_OPAD_VALUE, otarget,
 700			init_len, npe_ctx_addr, key, key_len);
 701	if (ret)
 702		return ret;
 703	return register_chain_var(tfm, HMAC_IPAD_VALUE, itarget,
 704			init_len, npe_ctx_addr, key, key_len);
 705}
 706
 707static int gen_rev_aes_key(struct crypto_tfm *tfm)
 708{
 709	struct crypt_ctl *crypt;
 710	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
 711	struct ix_sa_dir *dir = &ctx->decrypt;
 712
 713	crypt = get_crypt_desc_emerg();
 714	if (!crypt) {
 715		return -EAGAIN;
 716	}
 717	*(u32*)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR);
 718
 719	crypt->data.tfm = tfm;
 720	crypt->crypt_offs = 0;
 721	crypt->crypt_len = AES_BLOCK128;
 722	crypt->src_buf = 0;
 723	crypt->crypto_ctx = dir->npe_ctx_phys;
 724	crypt->icv_rev_aes = dir->npe_ctx_phys + sizeof(u32);
 725	crypt->mode = NPE_OP_ENC_GEN_KEY;
 726	crypt->init_len = dir->npe_ctx_idx;
 727	crypt->ctl_flags |= CTL_FLAG_GEN_REVAES;
 728
 729	atomic_inc(&ctx->configuring);
 730	qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
 731	BUG_ON(qmgr_stat_overflow(SEND_QID));
 732	return 0;
 733}
 734
 735static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
 736		const u8 *key, int key_len)
 737{
 738	u8 *cinfo;
 739	u32 cipher_cfg;
 740	u32 keylen_cfg = 0;
 741	struct ix_sa_dir *dir;
 742	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
 743	int err;
 744
 745	dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
 746	cinfo = dir->npe_ctx;
 747
 748	if (encrypt) {
 749		cipher_cfg = cipher_cfg_enc(tfm);
 750		dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT;
 751	} else {
 752		cipher_cfg = cipher_cfg_dec(tfm);
 753	}
 754	if (cipher_cfg & MOD_AES) {
 755		switch (key_len) {
 756		case 16: keylen_cfg = MOD_AES128; break;
 757		case 24: keylen_cfg = MOD_AES192; break;
 758		case 32: keylen_cfg = MOD_AES256; break;
 759		default:
 
 760			return -EINVAL;
 761		}
 762		cipher_cfg |= keylen_cfg;
 
 
 
 
 
 
 
 
 763	} else {
 764		err = crypto_des_verify_key(tfm, key);
 765		if (err)
 766			return err;
 
 767	}
 768	/* write cfg word to cryptinfo */
 769	*(u32*)cinfo = cpu_to_be32(cipher_cfg);
 770	cinfo += sizeof(cipher_cfg);
 771
 772	/* write cipher key to cryptinfo */
 773	memcpy(cinfo, key, key_len);
 774	/* NPE wants keylen set to DES3_EDE_KEY_SIZE even for single DES */
 775	if (key_len < DES3_EDE_KEY_SIZE && !(cipher_cfg & MOD_AES)) {
 776		memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE -key_len);
 777		key_len = DES3_EDE_KEY_SIZE;
 778	}
 779	dir->npe_ctx_idx = sizeof(cipher_cfg) + key_len;
 780	dir->npe_mode |= NPE_OP_CRYPT_ENABLE;
 781	if ((cipher_cfg & MOD_AES) && !encrypt) {
 782		return gen_rev_aes_key(tfm);
 783	}
 784	return 0;
 785}
 786
 787static struct buffer_desc *chainup_buffers(struct device *dev,
 788		struct scatterlist *sg,	unsigned nbytes,
 789		struct buffer_desc *buf, gfp_t flags,
 790		enum dma_data_direction dir)
 791{
 792	for (; nbytes > 0; sg = sg_next(sg)) {
 793		unsigned len = min(nbytes, sg->length);
 794		struct buffer_desc *next_buf;
 795		dma_addr_t next_buf_phys;
 796		void *ptr;
 797
 798		nbytes -= len;
 799		ptr = sg_virt(sg);
 800		next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys);
 801		if (!next_buf) {
 802			buf = NULL;
 803			break;
 804		}
 805		sg_dma_address(sg) = dma_map_single(dev, ptr, len, dir);
 806		buf->next = next_buf;
 807		buf->phys_next = next_buf_phys;
 808		buf = next_buf;
 809
 810		buf->phys_addr = sg_dma_address(sg);
 811		buf->buf_len = len;
 812		buf->dir = dir;
 813	}
 814	buf->next = NULL;
 815	buf->phys_next = 0;
 816	return buf;
 817}
 818
 819static int ablk_setkey(struct crypto_skcipher *tfm, const u8 *key,
 820			unsigned int key_len)
 821{
 822	struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
 
 823	int ret;
 824
 825	init_completion(&ctx->completion);
 826	atomic_inc(&ctx->configuring);
 827
 828	reset_sa_dir(&ctx->encrypt);
 829	reset_sa_dir(&ctx->decrypt);
 830
 831	ctx->encrypt.npe_mode = NPE_OP_HMAC_DISABLE;
 832	ctx->decrypt.npe_mode = NPE_OP_HMAC_DISABLE;
 833
 834	ret = setup_cipher(&tfm->base, 0, key, key_len);
 835	if (ret)
 836		goto out;
 837	ret = setup_cipher(&tfm->base, 1, key, key_len);
 
 
 
 
 
 
 
 
 
 
 838out:
 839	if (!atomic_dec_and_test(&ctx->configuring))
 840		wait_for_completion(&ctx->completion);
 841	return ret;
 842}
 843
 844static int ablk_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
 845			    unsigned int key_len)
 846{
 847	return verify_skcipher_des3_key(tfm, key) ?:
 848	       ablk_setkey(tfm, key, key_len);
 849}
 850
 851static int ablk_rfc3686_setkey(struct crypto_skcipher *tfm, const u8 *key,
 852		unsigned int key_len)
 853{
 854	struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
 855
 856	/* the nonce is stored in bytes at end of key */
 857	if (key_len < CTR_RFC3686_NONCE_SIZE)
 858		return -EINVAL;
 859
 860	memcpy(ctx->nonce, key + (key_len - CTR_RFC3686_NONCE_SIZE),
 861			CTR_RFC3686_NONCE_SIZE);
 862
 863	key_len -= CTR_RFC3686_NONCE_SIZE;
 864	return ablk_setkey(tfm, key, key_len);
 865}
 866
 867static int ablk_perform(struct skcipher_request *req, int encrypt)
 868{
 869	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 870	struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
 871	unsigned ivsize = crypto_skcipher_ivsize(tfm);
 872	struct ix_sa_dir *dir;
 873	struct crypt_ctl *crypt;
 874	unsigned int nbytes = req->cryptlen;
 875	enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
 876	struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
 877	struct buffer_desc src_hook;
 878	struct device *dev = &pdev->dev;
 879	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
 880				GFP_KERNEL : GFP_ATOMIC;
 881
 882	if (qmgr_stat_full(SEND_QID))
 883		return -EAGAIN;
 884	if (atomic_read(&ctx->configuring))
 885		return -EAGAIN;
 886
 887	dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
 888
 889	crypt = get_crypt_desc();
 890	if (!crypt)
 891		return -ENOMEM;
 892
 893	crypt->data.ablk_req = req;
 894	crypt->crypto_ctx = dir->npe_ctx_phys;
 895	crypt->mode = dir->npe_mode;
 896	crypt->init_len = dir->npe_ctx_idx;
 897
 898	crypt->crypt_offs = 0;
 899	crypt->crypt_len = nbytes;
 900
 901	BUG_ON(ivsize && !req->iv);
 902	memcpy(crypt->iv, req->iv, ivsize);
 903	if (req->src != req->dst) {
 904		struct buffer_desc dst_hook;
 905		crypt->mode |= NPE_OP_NOT_IN_PLACE;
 906		/* This was never tested by Intel
 907		 * for more than one dst buffer, I think. */
 
 908		req_ctx->dst = NULL;
 909		if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
 910					flags, DMA_FROM_DEVICE))
 911			goto free_buf_dest;
 912		src_direction = DMA_TO_DEVICE;
 913		req_ctx->dst = dst_hook.next;
 914		crypt->dst_buf = dst_hook.phys_next;
 915	} else {
 916		req_ctx->dst = NULL;
 917	}
 918	req_ctx->src = NULL;
 919	if (!chainup_buffers(dev, req->src, nbytes, &src_hook,
 920				flags, src_direction))
 921		goto free_buf_src;
 922
 923	req_ctx->src = src_hook.next;
 924	crypt->src_buf = src_hook.phys_next;
 925	crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK;
 926	qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
 927	BUG_ON(qmgr_stat_overflow(SEND_QID));
 928	return -EINPROGRESS;
 929
 930free_buf_src:
 931	free_buf_chain(dev, req_ctx->src, crypt->src_buf);
 932free_buf_dest:
 933	if (req->src != req->dst) {
 934		free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
 935	}
 936	crypt->ctl_flags = CTL_FLAG_UNUSED;
 937	return -ENOMEM;
 938}
 939
 940static int ablk_encrypt(struct skcipher_request *req)
 941{
 942	return ablk_perform(req, 1);
 943}
 944
 945static int ablk_decrypt(struct skcipher_request *req)
 946{
 947	return ablk_perform(req, 0);
 948}
 949
 950static int ablk_rfc3686_crypt(struct skcipher_request *req)
 951{
 952	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 953	struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
 954	u8 iv[CTR_RFC3686_BLOCK_SIZE];
 955	u8 *info = req->iv;
 956	int ret;
 957
 958	/* set up counter block */
 959        memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
 960	memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
 961
 962	/* initialize counter portion of counter block */
 963	*(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
 964		cpu_to_be32(1);
 965
 966	req->iv = iv;
 967	ret = ablk_perform(req, 1);
 968	req->iv = info;
 969	return ret;
 970}
 971
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 972static int aead_perform(struct aead_request *req, int encrypt,
 973		int cryptoffset, int eff_cryptlen, u8 *iv)
 974{
 975	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 976	struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
 977	unsigned ivsize = crypto_aead_ivsize(tfm);
 978	unsigned authsize = crypto_aead_authsize(tfm);
 979	struct ix_sa_dir *dir;
 980	struct crypt_ctl *crypt;
 981	unsigned int cryptlen;
 982	struct buffer_desc *buf, src_hook;
 983	struct aead_ctx *req_ctx = aead_request_ctx(req);
 984	struct device *dev = &pdev->dev;
 985	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
 986				GFP_KERNEL : GFP_ATOMIC;
 987	enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
 988	unsigned int lastlen;
 989
 990	if (qmgr_stat_full(SEND_QID))
 991		return -EAGAIN;
 992	if (atomic_read(&ctx->configuring))
 993		return -EAGAIN;
 994
 995	if (encrypt) {
 996		dir = &ctx->encrypt;
 997		cryptlen = req->cryptlen;
 998	} else {
 999		dir = &ctx->decrypt;
1000		/* req->cryptlen includes the authsize when decrypting */
1001		cryptlen = req->cryptlen -authsize;
1002		eff_cryptlen -= authsize;
1003	}
1004	crypt = get_crypt_desc();
1005	if (!crypt)
1006		return -ENOMEM;
1007
1008	crypt->data.aead_req = req;
1009	crypt->crypto_ctx = dir->npe_ctx_phys;
1010	crypt->mode = dir->npe_mode;
1011	crypt->init_len = dir->npe_ctx_idx;
1012
1013	crypt->crypt_offs = cryptoffset;
1014	crypt->crypt_len = eff_cryptlen;
1015
1016	crypt->auth_offs = 0;
1017	crypt->auth_len = req->assoclen + cryptlen;
1018	BUG_ON(ivsize && !req->iv);
1019	memcpy(crypt->iv, req->iv, ivsize);
1020
1021	buf = chainup_buffers(dev, req->src, crypt->auth_len,
1022			      &src_hook, flags, src_direction);
1023	req_ctx->src = src_hook.next;
1024	crypt->src_buf = src_hook.phys_next;
1025	if (!buf)
1026		goto free_buf_src;
1027
1028	lastlen = buf->buf_len;
1029	if (lastlen >= authsize)
1030		crypt->icv_rev_aes = buf->phys_addr +
1031				     buf->buf_len - authsize;
1032
1033	req_ctx->dst = NULL;
1034
1035	if (req->src != req->dst) {
1036		struct buffer_desc dst_hook;
1037
1038		crypt->mode |= NPE_OP_NOT_IN_PLACE;
1039		src_direction = DMA_TO_DEVICE;
1040
1041		buf = chainup_buffers(dev, req->dst, crypt->auth_len,
1042				      &dst_hook, flags, DMA_FROM_DEVICE);
1043		req_ctx->dst = dst_hook.next;
1044		crypt->dst_buf = dst_hook.phys_next;
1045
1046		if (!buf)
1047			goto free_buf_dst;
1048
1049		if (encrypt) {
1050			lastlen = buf->buf_len;
1051			if (lastlen >= authsize)
1052				crypt->icv_rev_aes = buf->phys_addr +
1053						     buf->buf_len - authsize;
1054		}
1055	}
1056
1057	if (unlikely(lastlen < authsize)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1058		/* The 12 hmac bytes are scattered,
1059		 * we need to copy them into a safe buffer */
1060		req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
1061				&crypt->icv_rev_aes);
1062		if (unlikely(!req_ctx->hmac_virt))
1063			goto free_buf_dst;
1064		if (!encrypt) {
1065			scatterwalk_map_and_copy(req_ctx->hmac_virt,
1066				req->src, cryptlen, authsize, 0);
1067		}
1068		req_ctx->encrypt = encrypt;
1069	} else {
1070		req_ctx->hmac_virt = NULL;
1071	}
 
 
 
 
 
 
 
 
1072
1073	crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
1074	qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
1075	BUG_ON(qmgr_stat_overflow(SEND_QID));
1076	return -EINPROGRESS;
1077
1078free_buf_dst:
1079	free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
1080free_buf_src:
1081	free_buf_chain(dev, req_ctx->src, crypt->src_buf);
 
 
 
1082	crypt->ctl_flags = CTL_FLAG_UNUSED;
1083	return -ENOMEM;
1084}
1085
1086static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
1087{
1088	struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1089	unsigned digest_len = crypto_aead_maxauthsize(tfm);
 
1090	int ret;
1091
1092	if (!ctx->enckey_len && !ctx->authkey_len)
1093		return 0;
1094	init_completion(&ctx->completion);
1095	atomic_inc(&ctx->configuring);
1096
1097	reset_sa_dir(&ctx->encrypt);
1098	reset_sa_dir(&ctx->decrypt);
1099
1100	ret = setup_cipher(&tfm->base, 0, ctx->enckey, ctx->enckey_len);
1101	if (ret)
1102		goto out;
1103	ret = setup_cipher(&tfm->base, 1, ctx->enckey, ctx->enckey_len);
1104	if (ret)
1105		goto out;
1106	ret = setup_auth(&tfm->base, 0, authsize, ctx->authkey,
1107			ctx->authkey_len, digest_len);
1108	if (ret)
1109		goto out;
1110	ret = setup_auth(&tfm->base, 1, authsize,  ctx->authkey,
1111			ctx->authkey_len, digest_len);
 
 
 
 
 
 
 
 
 
 
 
1112out:
1113	if (!atomic_dec_and_test(&ctx->configuring))
1114		wait_for_completion(&ctx->completion);
1115	return ret;
1116}
1117
1118static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
1119{
1120	int max = crypto_aead_maxauthsize(tfm) >> 2;
1121
1122	if ((authsize>>2) < 1 || (authsize>>2) > max || (authsize & 3))
1123		return -EINVAL;
1124	return aead_setup(tfm, authsize);
1125}
1126
1127static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
1128			unsigned int keylen)
1129{
1130	struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1131	struct crypto_authenc_keys keys;
1132
1133	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
1134		goto badkey;
1135
1136	if (keys.authkeylen > sizeof(ctx->authkey))
1137		goto badkey;
1138
1139	if (keys.enckeylen > sizeof(ctx->enckey))
1140		goto badkey;
1141
1142	memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
1143	memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
1144	ctx->authkey_len = keys.authkeylen;
1145	ctx->enckey_len = keys.enckeylen;
1146
1147	memzero_explicit(&keys, sizeof(keys));
1148	return aead_setup(tfm, crypto_aead_authsize(tfm));
1149badkey:
1150	memzero_explicit(&keys, sizeof(keys));
1151	return -EINVAL;
1152}
1153
1154static int des3_aead_setkey(struct crypto_aead *tfm, const u8 *key,
1155			    unsigned int keylen)
1156{
1157	struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1158	struct crypto_authenc_keys keys;
1159	int err;
1160
1161	err = crypto_authenc_extractkeys(&keys, key, keylen);
1162	if (unlikely(err))
1163		goto badkey;
1164
1165	err = -EINVAL;
1166	if (keys.authkeylen > sizeof(ctx->authkey))
1167		goto badkey;
1168
1169	err = verify_aead_des3_key(tfm, keys.enckey, keys.enckeylen);
1170	if (err)
1171		goto badkey;
1172
1173	memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
1174	memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
1175	ctx->authkey_len = keys.authkeylen;
1176	ctx->enckey_len = keys.enckeylen;
1177
1178	memzero_explicit(&keys, sizeof(keys));
1179	return aead_setup(tfm, crypto_aead_authsize(tfm));
1180badkey:
1181	memzero_explicit(&keys, sizeof(keys));
1182	return err;
1183}
1184
1185static int aead_encrypt(struct aead_request *req)
1186{
1187	return aead_perform(req, 1, req->assoclen, req->cryptlen, req->iv);
 
 
1188}
1189
1190static int aead_decrypt(struct aead_request *req)
1191{
1192	return aead_perform(req, 0, req->assoclen, req->cryptlen, req->iv);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1193}
1194
1195static struct ixp_alg ixp4xx_algos[] = {
1196{
1197	.crypto	= {
1198		.base.cra_name		= "cbc(des)",
1199		.base.cra_blocksize	= DES_BLOCK_SIZE,
1200
1201		.min_keysize		= DES_KEY_SIZE,
1202		.max_keysize		= DES_KEY_SIZE,
1203		.ivsize			= DES_BLOCK_SIZE,
 
 
 
1204	},
1205	.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1206	.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1207
1208}, {
1209	.crypto	= {
1210		.base.cra_name		= "ecb(des)",
1211		.base.cra_blocksize	= DES_BLOCK_SIZE,
1212		.min_keysize		= DES_KEY_SIZE,
1213		.max_keysize		= DES_KEY_SIZE,
 
 
 
1214	},
1215	.cfg_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192,
1216	.cfg_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192,
1217}, {
1218	.crypto	= {
1219		.base.cra_name		= "cbc(des3_ede)",
1220		.base.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
1221
1222		.min_keysize		= DES3_EDE_KEY_SIZE,
1223		.max_keysize		= DES3_EDE_KEY_SIZE,
1224		.ivsize			= DES3_EDE_BLOCK_SIZE,
1225		.setkey			= ablk_des3_setkey,
 
 
1226	},
1227	.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1228	.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1229}, {
1230	.crypto	= {
1231		.base.cra_name		= "ecb(des3_ede)",
1232		.base.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
1233
1234		.min_keysize		= DES3_EDE_KEY_SIZE,
1235		.max_keysize		= DES3_EDE_KEY_SIZE,
1236		.setkey			= ablk_des3_setkey,
 
1237	},
1238	.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_ECB | KEYLEN_192,
1239	.cfg_dec = CIPH_DECR | MOD_3DES | MOD_ECB | KEYLEN_192,
1240}, {
1241	.crypto	= {
1242		.base.cra_name		= "cbc(aes)",
1243		.base.cra_blocksize	= AES_BLOCK_SIZE,
1244
1245		.min_keysize		= AES_MIN_KEY_SIZE,
1246		.max_keysize		= AES_MAX_KEY_SIZE,
1247		.ivsize			= AES_BLOCK_SIZE,
 
 
 
1248	},
1249	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1250	.cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1251}, {
1252	.crypto	= {
1253		.base.cra_name		= "ecb(aes)",
1254		.base.cra_blocksize	= AES_BLOCK_SIZE,
1255
1256		.min_keysize		= AES_MIN_KEY_SIZE,
1257		.max_keysize		= AES_MAX_KEY_SIZE,
 
 
1258	},
1259	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_ECB,
1260	.cfg_dec = CIPH_DECR | MOD_AES | MOD_ECB,
1261}, {
1262	.crypto	= {
1263		.base.cra_name		= "ctr(aes)",
1264		.base.cra_blocksize	= 1,
1265
1266		.min_keysize		= AES_MIN_KEY_SIZE,
1267		.max_keysize		= AES_MAX_KEY_SIZE,
1268		.ivsize			= AES_BLOCK_SIZE,
 
 
 
1269	},
1270	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1271	.cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1272}, {
1273	.crypto	= {
1274		.base.cra_name		= "rfc3686(ctr(aes))",
1275		.base.cra_blocksize	= 1,
1276
1277		.min_keysize		= AES_MIN_KEY_SIZE,
1278		.max_keysize		= AES_MAX_KEY_SIZE,
1279		.ivsize			= AES_BLOCK_SIZE,
1280		.setkey			= ablk_rfc3686_setkey,
1281		.encrypt		= ablk_rfc3686_crypt,
1282		.decrypt		= ablk_rfc3686_crypt,
 
 
1283	},
1284	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1285	.cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1286} };
1287
1288static struct ixp_aead_alg ixp4xx_aeads[] = {
1289{
1290	.crypto	= {
1291		.base = {
1292			.cra_name	= "authenc(hmac(md5),cbc(des))",
1293			.cra_blocksize	= DES_BLOCK_SIZE,
1294		},
1295		.ivsize		= DES_BLOCK_SIZE,
1296		.maxauthsize	= MD5_DIGEST_SIZE,
 
1297	},
1298	.hash = &hash_alg_md5,
1299	.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1300	.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1301}, {
1302	.crypto	= {
1303		.base = {
1304			.cra_name	= "authenc(hmac(md5),cbc(des3_ede))",
1305			.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
1306		},
1307		.ivsize		= DES3_EDE_BLOCK_SIZE,
1308		.maxauthsize	= MD5_DIGEST_SIZE,
1309		.setkey		= des3_aead_setkey,
1310	},
1311	.hash = &hash_alg_md5,
1312	.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1313	.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1314}, {
1315	.crypto	= {
1316		.base = {
1317			.cra_name	= "authenc(hmac(sha1),cbc(des))",
1318			.cra_blocksize	= DES_BLOCK_SIZE,
1319		},
1320			.ivsize		= DES_BLOCK_SIZE,
1321			.maxauthsize	= SHA1_DIGEST_SIZE,
 
 
1322	},
1323	.hash = &hash_alg_sha1,
1324	.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1325	.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1326}, {
1327	.crypto	= {
1328		.base = {
1329			.cra_name	= "authenc(hmac(sha1),cbc(des3_ede))",
1330			.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
1331		},
1332		.ivsize		= DES3_EDE_BLOCK_SIZE,
1333		.maxauthsize	= SHA1_DIGEST_SIZE,
1334		.setkey		= des3_aead_setkey,
1335	},
1336	.hash = &hash_alg_sha1,
1337	.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1338	.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1339}, {
1340	.crypto	= {
1341		.base = {
1342			.cra_name	= "authenc(hmac(md5),cbc(aes))",
1343			.cra_blocksize	= AES_BLOCK_SIZE,
1344		},
1345		.ivsize		= AES_BLOCK_SIZE,
1346		.maxauthsize	= MD5_DIGEST_SIZE,
 
1347	},
1348	.hash = &hash_alg_md5,
1349	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1350	.cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1351}, {
1352	.crypto	= {
1353		.base = {
1354			.cra_name	= "authenc(hmac(sha1),cbc(aes))",
1355			.cra_blocksize	= AES_BLOCK_SIZE,
1356		},
1357		.ivsize		= AES_BLOCK_SIZE,
1358		.maxauthsize	= SHA1_DIGEST_SIZE,
 
1359	},
1360	.hash = &hash_alg_sha1,
1361	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1362	.cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1363} };
1364
1365#define IXP_POSTFIX "-ixp4xx"
1366
1367static const struct platform_device_info ixp_dev_info __initdata = {
1368	.name		= DRIVER_NAME,
1369	.id		= 0,
1370	.dma_mask	= DMA_BIT_MASK(32),
1371};
1372
1373static int __init ixp_module_init(void)
1374{
1375	int num = ARRAY_SIZE(ixp4xx_algos);
1376	int i, err;
1377
1378	pdev = platform_device_register_full(&ixp_dev_info);
1379	if (IS_ERR(pdev))
1380		return PTR_ERR(pdev);
1381
1382	spin_lock_init(&desc_lock);
1383	spin_lock_init(&emerg_lock);
1384
1385	err = init_ixp_crypto(&pdev->dev);
1386	if (err) {
1387		platform_device_unregister(pdev);
1388		return err;
1389	}
1390	for (i=0; i< num; i++) {
1391		struct skcipher_alg *cra = &ixp4xx_algos[i].crypto;
1392
1393		if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
1394			"%s"IXP_POSTFIX, cra->base.cra_name) >=
1395			CRYPTO_MAX_ALG_NAME)
1396		{
1397			continue;
1398		}
1399		if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES)) {
1400			continue;
1401		}
1402
1403		/* block ciphers */
1404		cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1405				      CRYPTO_ALG_ASYNC |
1406				      CRYPTO_ALG_ALLOCATES_MEMORY;
1407		if (!cra->setkey)
1408			cra->setkey = ablk_setkey;
1409		if (!cra->encrypt)
1410			cra->encrypt = ablk_encrypt;
1411		if (!cra->decrypt)
1412			cra->decrypt = ablk_decrypt;
1413		cra->init = init_tfm_ablk;
1414		cra->exit = exit_tfm_ablk;
1415
1416		cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
1417		cra->base.cra_module = THIS_MODULE;
1418		cra->base.cra_alignmask = 3;
1419		cra->base.cra_priority = 300;
1420		if (crypto_register_skcipher(cra))
 
 
 
 
 
 
 
 
 
 
 
 
 
1421			printk(KERN_ERR "Failed to register '%s'\n",
1422				cra->base.cra_name);
1423		else
1424			ixp4xx_algos[i].registered = 1;
1425	}
1426
1427	for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
1428		struct aead_alg *cra = &ixp4xx_aeads[i].crypto;
1429
1430		if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
1431			     "%s"IXP_POSTFIX, cra->base.cra_name) >=
1432		    CRYPTO_MAX_ALG_NAME)
1433			continue;
1434		if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES))
1435			continue;
1436
1437		/* authenc */
1438		cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1439				      CRYPTO_ALG_ASYNC |
1440				      CRYPTO_ALG_ALLOCATES_MEMORY;
1441		cra->setkey = cra->setkey ?: aead_setkey;
1442		cra->setauthsize = aead_setauthsize;
1443		cra->encrypt = aead_encrypt;
1444		cra->decrypt = aead_decrypt;
1445		cra->init = init_tfm_aead;
1446		cra->exit = exit_tfm_aead;
1447
1448		cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
1449		cra->base.cra_module = THIS_MODULE;
1450		cra->base.cra_alignmask = 3;
1451		cra->base.cra_priority = 300;
1452
1453		if (crypto_register_aead(cra))
1454			printk(KERN_ERR "Failed to register '%s'\n",
1455				cra->base.cra_driver_name);
1456		else
1457			ixp4xx_aeads[i].registered = 1;
1458	}
1459	return 0;
1460}
1461
1462static void __exit ixp_module_exit(void)
1463{
1464	int num = ARRAY_SIZE(ixp4xx_algos);
1465	int i;
1466
1467	for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
1468		if (ixp4xx_aeads[i].registered)
1469			crypto_unregister_aead(&ixp4xx_aeads[i].crypto);
1470	}
1471
1472	for (i=0; i< num; i++) {
1473		if (ixp4xx_algos[i].registered)
1474			crypto_unregister_skcipher(&ixp4xx_algos[i].crypto);
1475	}
1476	release_ixp_crypto(&pdev->dev);
1477	platform_device_unregister(pdev);
1478}
1479
1480module_init(ixp_module_init);
1481module_exit(ixp_module_exit);
1482
1483MODULE_LICENSE("GPL");
1484MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
1485MODULE_DESCRIPTION("IXP4xx hardware crypto");
1486
v3.15
 
   1/*
   2 * Intel IXP4xx NPE-C crypto driver
   3 *
   4 * Copyright (C) 2008 Christian Hohnstaedt <chohnstaedt@innominate.com>
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms of version 2 of the GNU General Public License
   8 * as published by the Free Software Foundation.
   9 *
  10 */
  11
  12#include <linux/platform_device.h>
  13#include <linux/dma-mapping.h>
  14#include <linux/dmapool.h>
  15#include <linux/crypto.h>
  16#include <linux/kernel.h>
  17#include <linux/rtnetlink.h>
  18#include <linux/interrupt.h>
  19#include <linux/spinlock.h>
  20#include <linux/gfp.h>
  21#include <linux/module.h>
  22
  23#include <crypto/ctr.h>
  24#include <crypto/des.h>
  25#include <crypto/aes.h>
 
  26#include <crypto/sha.h>
  27#include <crypto/algapi.h>
  28#include <crypto/aead.h>
 
  29#include <crypto/authenc.h>
  30#include <crypto/scatterwalk.h>
  31
  32#include <mach/npe.h>
  33#include <mach/qmgr.h>
  34
  35#define MAX_KEYLEN 32
  36
  37/* hash: cfgword + 2 * digestlen; crypt: keylen + cfgword */
  38#define NPE_CTX_LEN 80
  39#define AES_BLOCK128 16
  40
  41#define NPE_OP_HASH_VERIFY   0x01
  42#define NPE_OP_CCM_ENABLE    0x04
  43#define NPE_OP_CRYPT_ENABLE  0x08
  44#define NPE_OP_HASH_ENABLE   0x10
  45#define NPE_OP_NOT_IN_PLACE  0x20
  46#define NPE_OP_HMAC_DISABLE  0x40
  47#define NPE_OP_CRYPT_ENCRYPT 0x80
  48
  49#define NPE_OP_CCM_GEN_MIC   0xcc
  50#define NPE_OP_HASH_GEN_ICV  0x50
  51#define NPE_OP_ENC_GEN_KEY   0xc9
  52
  53#define MOD_ECB     0x0000
  54#define MOD_CTR     0x1000
  55#define MOD_CBC_ENC 0x2000
  56#define MOD_CBC_DEC 0x3000
  57#define MOD_CCM_ENC 0x4000
  58#define MOD_CCM_DEC 0x5000
  59
  60#define KEYLEN_128  4
  61#define KEYLEN_192  6
  62#define KEYLEN_256  8
  63
  64#define CIPH_DECR   0x0000
  65#define CIPH_ENCR   0x0400
  66
  67#define MOD_DES     0x0000
  68#define MOD_TDEA2   0x0100
  69#define MOD_3DES   0x0200
  70#define MOD_AES     0x0800
  71#define MOD_AES128  (0x0800 | KEYLEN_128)
  72#define MOD_AES192  (0x0900 | KEYLEN_192)
  73#define MOD_AES256  (0x0a00 | KEYLEN_256)
  74
  75#define MAX_IVLEN   16
  76#define NPE_ID      2  /* NPE C */
  77#define NPE_QLEN    16
  78/* Space for registering when the first
  79 * NPE_QLEN crypt_ctl are busy */
  80#define NPE_QLEN_TOTAL 64
  81
  82#define SEND_QID    29
  83#define RECV_QID    30
  84
  85#define CTL_FLAG_UNUSED		0x0000
  86#define CTL_FLAG_USED		0x1000
  87#define CTL_FLAG_PERFORM_ABLK	0x0001
  88#define CTL_FLAG_GEN_ICV	0x0002
  89#define CTL_FLAG_GEN_REVAES	0x0004
  90#define CTL_FLAG_PERFORM_AEAD	0x0008
  91#define CTL_FLAG_MASK		0x000f
  92
  93#define HMAC_IPAD_VALUE   0x36
  94#define HMAC_OPAD_VALUE   0x5C
  95#define HMAC_PAD_BLOCKLEN SHA1_BLOCK_SIZE
  96
  97#define MD5_DIGEST_SIZE   16
  98
  99struct buffer_desc {
 100	u32 phys_next;
 101#ifdef __ARMEB__
 102	u16 buf_len;
 103	u16 pkt_len;
 104#else
 105	u16 pkt_len;
 106	u16 buf_len;
 107#endif
 108	u32 phys_addr;
 109	u32 __reserved[4];
 110	struct buffer_desc *next;
 111	enum dma_data_direction dir;
 112};
 113
 114struct crypt_ctl {
 115#ifdef __ARMEB__
 116	u8 mode;		/* NPE_OP_*  operation mode */
 117	u8 init_len;
 118	u16 reserved;
 119#else
 120	u16 reserved;
 121	u8 init_len;
 122	u8 mode;		/* NPE_OP_*  operation mode */
 123#endif
 124	u8 iv[MAX_IVLEN];	/* IV for CBC mode or CTR IV for CTR mode */
 125	u32 icv_rev_aes;	/* icv or rev aes */
 126	u32 src_buf;
 127	u32 dst_buf;
 128#ifdef __ARMEB__
 129	u16 auth_offs;		/* Authentication start offset */
 130	u16 auth_len;		/* Authentication data length */
 131	u16 crypt_offs;		/* Cryption start offset */
 132	u16 crypt_len;		/* Cryption data length */
 133#else
 134	u16 auth_len;		/* Authentication data length */
 135	u16 auth_offs;		/* Authentication start offset */
 136	u16 crypt_len;		/* Cryption data length */
 137	u16 crypt_offs;		/* Cryption start offset */
 138#endif
 139	u32 aadAddr;		/* Additional Auth Data Addr for CCM mode */
 140	u32 crypto_ctx;		/* NPE Crypto Param structure address */
 141
 142	/* Used by Host: 4*4 bytes*/
 143	unsigned ctl_flags;
 144	union {
 145		struct ablkcipher_request *ablk_req;
 146		struct aead_request *aead_req;
 147		struct crypto_tfm *tfm;
 148	} data;
 149	struct buffer_desc *regist_buf;
 150	u8 *regist_ptr;
 151};
 152
 153struct ablk_ctx {
 154	struct buffer_desc *src;
 155	struct buffer_desc *dst;
 156};
 157
 158struct aead_ctx {
 159	struct buffer_desc *buffer;
 
 160	struct scatterlist ivlist;
 161	/* used when the hmac is not on one sg entry */
 162	u8 *hmac_virt;
 163	int encrypt;
 164};
 165
 166struct ix_hash_algo {
 167	u32 cfgword;
 168	unsigned char *icv;
 169};
 170
 171struct ix_sa_dir {
 172	unsigned char *npe_ctx;
 173	dma_addr_t npe_ctx_phys;
 174	int npe_ctx_idx;
 175	u8 npe_mode;
 176};
 177
 178struct ixp_ctx {
 179	struct ix_sa_dir encrypt;
 180	struct ix_sa_dir decrypt;
 181	int authkey_len;
 182	u8 authkey[MAX_KEYLEN];
 183	int enckey_len;
 184	u8 enckey[MAX_KEYLEN];
 185	u8 salt[MAX_IVLEN];
 186	u8 nonce[CTR_RFC3686_NONCE_SIZE];
 187	unsigned salted;
 188	atomic_t configuring;
 189	struct completion completion;
 190};
 191
 192struct ixp_alg {
 193	struct crypto_alg crypto;
 
 
 
 
 
 
 
 
 
 194	const struct ix_hash_algo *hash;
 195	u32 cfg_enc;
 196	u32 cfg_dec;
 197
 198	int registered;
 199};
 200
 201static const struct ix_hash_algo hash_alg_md5 = {
 202	.cfgword	= 0xAA010004,
 203	.icv		= "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
 204			  "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
 205};
 206static const struct ix_hash_algo hash_alg_sha1 = {
 207	.cfgword	= 0x00000005,
 208	.icv		= "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA"
 209			  "\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0",
 210};
 211
 212static struct npe *npe_c;
 213static struct dma_pool *buffer_pool = NULL;
 214static struct dma_pool *ctx_pool = NULL;
 215
 216static struct crypt_ctl *crypt_virt = NULL;
 217static dma_addr_t crypt_phys;
 218
 219static int support_aes = 1;
 220
 221#define DRIVER_NAME "ixp4xx_crypto"
 222
 223static struct platform_device *pdev;
 224
 225static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
 226{
 227	return crypt_phys + (virt - crypt_virt) * sizeof(struct crypt_ctl);
 228}
 229
 230static inline struct crypt_ctl *crypt_phys2virt(dma_addr_t phys)
 231{
 232	return crypt_virt + (phys - crypt_phys) / sizeof(struct crypt_ctl);
 233}
 234
 235static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm)
 236{
 237	return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_enc;
 238}
 239
 240static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm)
 241{
 242	return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_dec;
 243}
 244
 245static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
 246{
 247	return container_of(tfm->__crt_alg, struct ixp_alg, crypto)->hash;
 248}
 249
 250static int setup_crypt_desc(void)
 251{
 252	struct device *dev = &pdev->dev;
 253	BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
 254	crypt_virt = dma_alloc_coherent(dev,
 255			NPE_QLEN * sizeof(struct crypt_ctl),
 256			&crypt_phys, GFP_ATOMIC);
 257	if (!crypt_virt)
 258		return -ENOMEM;
 259	memset(crypt_virt, 0, NPE_QLEN * sizeof(struct crypt_ctl));
 260	return 0;
 261}
 262
 263static spinlock_t desc_lock;
 264static struct crypt_ctl *get_crypt_desc(void)
 265{
 266	int i;
 267	static int idx = 0;
 268	unsigned long flags;
 269
 270	spin_lock_irqsave(&desc_lock, flags);
 271
 272	if (unlikely(!crypt_virt))
 273		setup_crypt_desc();
 274	if (unlikely(!crypt_virt)) {
 275		spin_unlock_irqrestore(&desc_lock, flags);
 276		return NULL;
 277	}
 278	i = idx;
 279	if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
 280		if (++idx >= NPE_QLEN)
 281			idx = 0;
 282		crypt_virt[i].ctl_flags = CTL_FLAG_USED;
 283		spin_unlock_irqrestore(&desc_lock, flags);
 284		return crypt_virt +i;
 285	} else {
 286		spin_unlock_irqrestore(&desc_lock, flags);
 287		return NULL;
 288	}
 289}
 290
 291static spinlock_t emerg_lock;
 292static struct crypt_ctl *get_crypt_desc_emerg(void)
 293{
 294	int i;
 295	static int idx = NPE_QLEN;
 296	struct crypt_ctl *desc;
 297	unsigned long flags;
 298
 299	desc = get_crypt_desc();
 300	if (desc)
 301		return desc;
 302	if (unlikely(!crypt_virt))
 303		return NULL;
 304
 305	spin_lock_irqsave(&emerg_lock, flags);
 306	i = idx;
 307	if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
 308		if (++idx >= NPE_QLEN_TOTAL)
 309			idx = NPE_QLEN;
 310		crypt_virt[i].ctl_flags = CTL_FLAG_USED;
 311		spin_unlock_irqrestore(&emerg_lock, flags);
 312		return crypt_virt +i;
 313	} else {
 314		spin_unlock_irqrestore(&emerg_lock, flags);
 315		return NULL;
 316	}
 317}
 318
 319static void free_buf_chain(struct device *dev, struct buffer_desc *buf,u32 phys)
 
 320{
 321	while (buf) {
 322		struct buffer_desc *buf1;
 323		u32 phys1;
 324
 325		buf1 = buf->next;
 326		phys1 = buf->phys_next;
 327		dma_unmap_single(dev, buf->phys_next, buf->buf_len, buf->dir);
 328		dma_pool_free(buffer_pool, buf, phys);
 329		buf = buf1;
 330		phys = phys1;
 331	}
 332}
 333
 334static struct tasklet_struct crypto_done_tasklet;
 335
 336static void finish_scattered_hmac(struct crypt_ctl *crypt)
 337{
 338	struct aead_request *req = crypt->data.aead_req;
 339	struct aead_ctx *req_ctx = aead_request_ctx(req);
 340	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 341	int authsize = crypto_aead_authsize(tfm);
 342	int decryptlen = req->cryptlen - authsize;
 343
 344	if (req_ctx->encrypt) {
 345		scatterwalk_map_and_copy(req_ctx->hmac_virt,
 346			req->src, decryptlen, authsize, 1);
 347	}
 348	dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes);
 349}
 350
 351static void one_packet(dma_addr_t phys)
 352{
 353	struct device *dev = &pdev->dev;
 354	struct crypt_ctl *crypt;
 355	struct ixp_ctx *ctx;
 356	int failed;
 357
 358	failed = phys & 0x1 ? -EBADMSG : 0;
 359	phys &= ~0x3;
 360	crypt = crypt_phys2virt(phys);
 361
 362	switch (crypt->ctl_flags & CTL_FLAG_MASK) {
 363	case CTL_FLAG_PERFORM_AEAD: {
 364		struct aead_request *req = crypt->data.aead_req;
 365		struct aead_ctx *req_ctx = aead_request_ctx(req);
 366
 367		free_buf_chain(dev, req_ctx->buffer, crypt->src_buf);
 
 368		if (req_ctx->hmac_virt) {
 369			finish_scattered_hmac(crypt);
 370		}
 371		req->base.complete(&req->base, failed);
 372		break;
 373	}
 374	case CTL_FLAG_PERFORM_ABLK: {
 375		struct ablkcipher_request *req = crypt->data.ablk_req;
 376		struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
 377
 378		if (req_ctx->dst) {
 379			free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
 380		}
 381		free_buf_chain(dev, req_ctx->src, crypt->src_buf);
 382		req->base.complete(&req->base, failed);
 383		break;
 384	}
 385	case CTL_FLAG_GEN_ICV:
 386		ctx = crypto_tfm_ctx(crypt->data.tfm);
 387		dma_pool_free(ctx_pool, crypt->regist_ptr,
 388				crypt->regist_buf->phys_addr);
 389		dma_pool_free(buffer_pool, crypt->regist_buf, crypt->src_buf);
 390		if (atomic_dec_and_test(&ctx->configuring))
 391			complete(&ctx->completion);
 392		break;
 393	case CTL_FLAG_GEN_REVAES:
 394		ctx = crypto_tfm_ctx(crypt->data.tfm);
 395		*(u32*)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR);
 396		if (atomic_dec_and_test(&ctx->configuring))
 397			complete(&ctx->completion);
 398		break;
 399	default:
 400		BUG();
 401	}
 402	crypt->ctl_flags = CTL_FLAG_UNUSED;
 403}
 404
 405static void irqhandler(void *_unused)
 406{
 407	tasklet_schedule(&crypto_done_tasklet);
 408}
 409
 410static void crypto_done_action(unsigned long arg)
 411{
 412	int i;
 413
 414	for(i=0; i<4; i++) {
 415		dma_addr_t phys = qmgr_get_entry(RECV_QID);
 416		if (!phys)
 417			return;
 418		one_packet(phys);
 419	}
 420	tasklet_schedule(&crypto_done_tasklet);
 421}
 422
 423static int init_ixp_crypto(struct device *dev)
 424{
 425	int ret = -ENODEV;
 426	u32 msg[2] = { 0, 0 };
 427
 428	if (! ( ~(*IXP4XX_EXP_CFG2) & (IXP4XX_FEATURE_HASH |
 429				IXP4XX_FEATURE_AES | IXP4XX_FEATURE_DES))) {
 430		printk(KERN_ERR "ixp_crypto: No HW crypto available\n");
 431		return ret;
 432	}
 433	npe_c = npe_request(NPE_ID);
 434	if (!npe_c)
 435		return ret;
 436
 437	if (!npe_running(npe_c)) {
 438		ret = npe_load_firmware(npe_c, npe_name(npe_c), dev);
 439		if (ret) {
 440			return ret;
 441		}
 442		if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
 443			goto npe_error;
 444	} else {
 445		if (npe_send_message(npe_c, msg, "STATUS_MSG"))
 446			goto npe_error;
 447
 448		if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
 449			goto npe_error;
 450	}
 451
 452	switch ((msg[1]>>16) & 0xff) {
 453	case 3:
 454		printk(KERN_WARNING "Firmware of %s lacks AES support\n",
 455				npe_name(npe_c));
 456		support_aes = 0;
 457		break;
 458	case 4:
 459	case 5:
 460		support_aes = 1;
 461		break;
 462	default:
 463		printk(KERN_ERR "Firmware of %s lacks crypto support\n",
 464			npe_name(npe_c));
 465		return -ENODEV;
 
 466	}
 467	/* buffer_pool will also be used to sometimes store the hmac,
 468	 * so assure it is large enough
 469	 */
 470	BUILD_BUG_ON(SHA1_DIGEST_SIZE > sizeof(struct buffer_desc));
 471	buffer_pool = dma_pool_create("buffer", dev,
 472			sizeof(struct buffer_desc), 32, 0);
 473	ret = -ENOMEM;
 474	if (!buffer_pool) {
 475		goto err;
 476	}
 477	ctx_pool = dma_pool_create("context", dev,
 478			NPE_CTX_LEN, 16, 0);
 479	if (!ctx_pool) {
 480		goto err;
 481	}
 482	ret = qmgr_request_queue(SEND_QID, NPE_QLEN_TOTAL, 0, 0,
 483				 "ixp_crypto:out", NULL);
 484	if (ret)
 485		goto err;
 486	ret = qmgr_request_queue(RECV_QID, NPE_QLEN, 0, 0,
 487				 "ixp_crypto:in", NULL);
 488	if (ret) {
 489		qmgr_release_queue(SEND_QID);
 490		goto err;
 491	}
 492	qmgr_set_irq(RECV_QID, QUEUE_IRQ_SRC_NOT_EMPTY, irqhandler, NULL);
 493	tasklet_init(&crypto_done_tasklet, crypto_done_action, 0);
 494
 495	qmgr_enable_irq(RECV_QID);
 496	return 0;
 497
 498npe_error:
 499	printk(KERN_ERR "%s not responding\n", npe_name(npe_c));
 500	ret = -EIO;
 501err:
 502	if (ctx_pool)
 503		dma_pool_destroy(ctx_pool);
 504	if (buffer_pool)
 505		dma_pool_destroy(buffer_pool);
 506	npe_release(npe_c);
 507	return ret;
 508}
 509
 510static void release_ixp_crypto(struct device *dev)
 511{
 512	qmgr_disable_irq(RECV_QID);
 513	tasklet_kill(&crypto_done_tasklet);
 514
 515	qmgr_release_queue(SEND_QID);
 516	qmgr_release_queue(RECV_QID);
 517
 518	dma_pool_destroy(ctx_pool);
 519	dma_pool_destroy(buffer_pool);
 520
 521	npe_release(npe_c);
 522
 523	if (crypt_virt) {
 524		dma_free_coherent(dev,
 525			NPE_QLEN_TOTAL * sizeof( struct crypt_ctl),
 526			crypt_virt, crypt_phys);
 527	}
 528	return;
 529}
 530
 531static void reset_sa_dir(struct ix_sa_dir *dir)
 532{
 533	memset(dir->npe_ctx, 0, NPE_CTX_LEN);
 534	dir->npe_ctx_idx = 0;
 535	dir->npe_mode = 0;
 536}
 537
 538static int init_sa_dir(struct ix_sa_dir *dir)
 539{
 540	dir->npe_ctx = dma_pool_alloc(ctx_pool, GFP_KERNEL, &dir->npe_ctx_phys);
 541	if (!dir->npe_ctx) {
 542		return -ENOMEM;
 543	}
 544	reset_sa_dir(dir);
 545	return 0;
 546}
 547
 548static void free_sa_dir(struct ix_sa_dir *dir)
 549{
 550	memset(dir->npe_ctx, 0, NPE_CTX_LEN);
 551	dma_pool_free(ctx_pool, dir->npe_ctx, dir->npe_ctx_phys);
 552}
 553
 554static int init_tfm(struct crypto_tfm *tfm)
 555{
 556	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
 557	int ret;
 558
 559	atomic_set(&ctx->configuring, 0);
 560	ret = init_sa_dir(&ctx->encrypt);
 561	if (ret)
 562		return ret;
 563	ret = init_sa_dir(&ctx->decrypt);
 564	if (ret) {
 565		free_sa_dir(&ctx->encrypt);
 566	}
 567	return ret;
 568}
 569
 570static int init_tfm_ablk(struct crypto_tfm *tfm)
 571{
 572	tfm->crt_ablkcipher.reqsize = sizeof(struct ablk_ctx);
 573	return init_tfm(tfm);
 574}
 575
 576static int init_tfm_aead(struct crypto_tfm *tfm)
 577{
 578	tfm->crt_aead.reqsize = sizeof(struct aead_ctx);
 579	return init_tfm(tfm);
 580}
 581
 582static void exit_tfm(struct crypto_tfm *tfm)
 583{
 584	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
 585	free_sa_dir(&ctx->encrypt);
 586	free_sa_dir(&ctx->decrypt);
 587}
 588
 
 
 
 
 
 
 
 
 
 
 589static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
 590		int init_len, u32 ctx_addr, const u8 *key, int key_len)
 591{
 592	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
 593	struct crypt_ctl *crypt;
 594	struct buffer_desc *buf;
 595	int i;
 596	u8 *pad;
 597	u32 pad_phys, buf_phys;
 598
 599	BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN);
 600	pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys);
 601	if (!pad)
 602		return -ENOMEM;
 603	buf = dma_pool_alloc(buffer_pool, GFP_KERNEL, &buf_phys);
 604	if (!buf) {
 605		dma_pool_free(ctx_pool, pad, pad_phys);
 606		return -ENOMEM;
 607	}
 608	crypt = get_crypt_desc_emerg();
 609	if (!crypt) {
 610		dma_pool_free(ctx_pool, pad, pad_phys);
 611		dma_pool_free(buffer_pool, buf, buf_phys);
 612		return -EAGAIN;
 613	}
 614
 615	memcpy(pad, key, key_len);
 616	memset(pad + key_len, 0, HMAC_PAD_BLOCKLEN - key_len);
 617	for (i = 0; i < HMAC_PAD_BLOCKLEN; i++) {
 618		pad[i] ^= xpad;
 619	}
 620
 621	crypt->data.tfm = tfm;
 622	crypt->regist_ptr = pad;
 623	crypt->regist_buf = buf;
 624
 625	crypt->auth_offs = 0;
 626	crypt->auth_len = HMAC_PAD_BLOCKLEN;
 627	crypt->crypto_ctx = ctx_addr;
 628	crypt->src_buf = buf_phys;
 629	crypt->icv_rev_aes = target;
 630	crypt->mode = NPE_OP_HASH_GEN_ICV;
 631	crypt->init_len = init_len;
 632	crypt->ctl_flags |= CTL_FLAG_GEN_ICV;
 633
 634	buf->next = 0;
 635	buf->buf_len = HMAC_PAD_BLOCKLEN;
 636	buf->pkt_len = 0;
 637	buf->phys_addr = pad_phys;
 638
 639	atomic_inc(&ctx->configuring);
 640	qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
 641	BUG_ON(qmgr_stat_overflow(SEND_QID));
 642	return 0;
 643}
 644
 645static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned authsize,
 646		const u8 *key, int key_len, unsigned digest_len)
 647{
 648	u32 itarget, otarget, npe_ctx_addr;
 649	unsigned char *cinfo;
 650	int init_len, ret = 0;
 651	u32 cfgword;
 652	struct ix_sa_dir *dir;
 653	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
 654	const struct ix_hash_algo *algo;
 655
 656	dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
 657	cinfo = dir->npe_ctx + dir->npe_ctx_idx;
 658	algo = ix_hash(tfm);
 659
 660	/* write cfg word to cryptinfo */
 661	cfgword = algo->cfgword | ( authsize << 6); /* (authsize/4) << 8 */
 662#ifndef __ARMEB__
 663	cfgword ^= 0xAA000000; /* change the "byte swap" flags */
 664#endif
 665	*(u32*)cinfo = cpu_to_be32(cfgword);
 666	cinfo += sizeof(cfgword);
 667
 668	/* write ICV to cryptinfo */
 669	memcpy(cinfo, algo->icv, digest_len);
 670	cinfo += digest_len;
 671
 672	itarget = dir->npe_ctx_phys + dir->npe_ctx_idx
 673				+ sizeof(algo->cfgword);
 674	otarget = itarget + digest_len;
 675	init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx);
 676	npe_ctx_addr = dir->npe_ctx_phys + dir->npe_ctx_idx;
 677
 678	dir->npe_ctx_idx += init_len;
 679	dir->npe_mode |= NPE_OP_HASH_ENABLE;
 680
 681	if (!encrypt)
 682		dir->npe_mode |= NPE_OP_HASH_VERIFY;
 683
 684	ret = register_chain_var(tfm, HMAC_OPAD_VALUE, otarget,
 685			init_len, npe_ctx_addr, key, key_len);
 686	if (ret)
 687		return ret;
 688	return register_chain_var(tfm, HMAC_IPAD_VALUE, itarget,
 689			init_len, npe_ctx_addr, key, key_len);
 690}
 691
 692static int gen_rev_aes_key(struct crypto_tfm *tfm)
 693{
 694	struct crypt_ctl *crypt;
 695	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
 696	struct ix_sa_dir *dir = &ctx->decrypt;
 697
 698	crypt = get_crypt_desc_emerg();
 699	if (!crypt) {
 700		return -EAGAIN;
 701	}
 702	*(u32*)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR);
 703
 704	crypt->data.tfm = tfm;
 705	crypt->crypt_offs = 0;
 706	crypt->crypt_len = AES_BLOCK128;
 707	crypt->src_buf = 0;
 708	crypt->crypto_ctx = dir->npe_ctx_phys;
 709	crypt->icv_rev_aes = dir->npe_ctx_phys + sizeof(u32);
 710	crypt->mode = NPE_OP_ENC_GEN_KEY;
 711	crypt->init_len = dir->npe_ctx_idx;
 712	crypt->ctl_flags |= CTL_FLAG_GEN_REVAES;
 713
 714	atomic_inc(&ctx->configuring);
 715	qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
 716	BUG_ON(qmgr_stat_overflow(SEND_QID));
 717	return 0;
 718}
 719
 720static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
 721		const u8 *key, int key_len)
 722{
 723	u8 *cinfo;
 724	u32 cipher_cfg;
 725	u32 keylen_cfg = 0;
 726	struct ix_sa_dir *dir;
 727	struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
 728	u32 *flags = &tfm->crt_flags;
 729
 730	dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
 731	cinfo = dir->npe_ctx;
 732
 733	if (encrypt) {
 734		cipher_cfg = cipher_cfg_enc(tfm);
 735		dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT;
 736	} else {
 737		cipher_cfg = cipher_cfg_dec(tfm);
 738	}
 739	if (cipher_cfg & MOD_AES) {
 740		switch (key_len) {
 741		case 16: keylen_cfg = MOD_AES128; break;
 742		case 24: keylen_cfg = MOD_AES192; break;
 743		case 32: keylen_cfg = MOD_AES256; break;
 744		default:
 745			*flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
 746			return -EINVAL;
 747		}
 748		cipher_cfg |= keylen_cfg;
 749	} else if (cipher_cfg & MOD_3DES) {
 750		const u32 *K = (const u32 *)key;
 751		if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
 752			     !((K[2] ^ K[4]) | (K[3] ^ K[5]))))
 753		{
 754			*flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED;
 755			return -EINVAL;
 756		}
 757	} else {
 758		u32 tmp[DES_EXPKEY_WORDS];
 759		if (des_ekey(tmp, key) == 0) {
 760			*flags |= CRYPTO_TFM_RES_WEAK_KEY;
 761		}
 762	}
 763	/* write cfg word to cryptinfo */
 764	*(u32*)cinfo = cpu_to_be32(cipher_cfg);
 765	cinfo += sizeof(cipher_cfg);
 766
 767	/* write cipher key to cryptinfo */
 768	memcpy(cinfo, key, key_len);
 769	/* NPE wants keylen set to DES3_EDE_KEY_SIZE even for single DES */
 770	if (key_len < DES3_EDE_KEY_SIZE && !(cipher_cfg & MOD_AES)) {
 771		memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE -key_len);
 772		key_len = DES3_EDE_KEY_SIZE;
 773	}
 774	dir->npe_ctx_idx = sizeof(cipher_cfg) + key_len;
 775	dir->npe_mode |= NPE_OP_CRYPT_ENABLE;
 776	if ((cipher_cfg & MOD_AES) && !encrypt) {
 777		return gen_rev_aes_key(tfm);
 778	}
 779	return 0;
 780}
 781
 782static struct buffer_desc *chainup_buffers(struct device *dev,
 783		struct scatterlist *sg,	unsigned nbytes,
 784		struct buffer_desc *buf, gfp_t flags,
 785		enum dma_data_direction dir)
 786{
 787	for (;nbytes > 0; sg = scatterwalk_sg_next(sg)) {
 788		unsigned len = min(nbytes, sg->length);
 789		struct buffer_desc *next_buf;
 790		u32 next_buf_phys;
 791		void *ptr;
 792
 793		nbytes -= len;
 794		ptr = page_address(sg_page(sg)) + sg->offset;
 795		next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys);
 796		if (!next_buf) {
 797			buf = NULL;
 798			break;
 799		}
 800		sg_dma_address(sg) = dma_map_single(dev, ptr, len, dir);
 801		buf->next = next_buf;
 802		buf->phys_next = next_buf_phys;
 803		buf = next_buf;
 804
 805		buf->phys_addr = sg_dma_address(sg);
 806		buf->buf_len = len;
 807		buf->dir = dir;
 808	}
 809	buf->next = NULL;
 810	buf->phys_next = 0;
 811	return buf;
 812}
 813
 814static int ablk_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
 815			unsigned int key_len)
 816{
 817	struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
 818	u32 *flags = &tfm->base.crt_flags;
 819	int ret;
 820
 821	init_completion(&ctx->completion);
 822	atomic_inc(&ctx->configuring);
 823
 824	reset_sa_dir(&ctx->encrypt);
 825	reset_sa_dir(&ctx->decrypt);
 826
 827	ctx->encrypt.npe_mode = NPE_OP_HMAC_DISABLE;
 828	ctx->decrypt.npe_mode = NPE_OP_HMAC_DISABLE;
 829
 830	ret = setup_cipher(&tfm->base, 0, key, key_len);
 831	if (ret)
 832		goto out;
 833	ret = setup_cipher(&tfm->base, 1, key, key_len);
 834	if (ret)
 835		goto out;
 836
 837	if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
 838		if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) {
 839			ret = -EINVAL;
 840		} else {
 841			*flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
 842		}
 843	}
 844out:
 845	if (!atomic_dec_and_test(&ctx->configuring))
 846		wait_for_completion(&ctx->completion);
 847	return ret;
 848}
 849
 850static int ablk_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
 
 
 
 
 
 
 
 851		unsigned int key_len)
 852{
 853	struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
 854
 855	/* the nonce is stored in bytes at end of key */
 856	if (key_len < CTR_RFC3686_NONCE_SIZE)
 857		return -EINVAL;
 858
 859	memcpy(ctx->nonce, key + (key_len - CTR_RFC3686_NONCE_SIZE),
 860			CTR_RFC3686_NONCE_SIZE);
 861
 862	key_len -= CTR_RFC3686_NONCE_SIZE;
 863	return ablk_setkey(tfm, key, key_len);
 864}
 865
 866static int ablk_perform(struct ablkcipher_request *req, int encrypt)
 867{
 868	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
 869	struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
 870	unsigned ivsize = crypto_ablkcipher_ivsize(tfm);
 871	struct ix_sa_dir *dir;
 872	struct crypt_ctl *crypt;
 873	unsigned int nbytes = req->nbytes;
 874	enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
 875	struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
 876	struct buffer_desc src_hook;
 877	struct device *dev = &pdev->dev;
 878	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
 879				GFP_KERNEL : GFP_ATOMIC;
 880
 881	if (qmgr_stat_full(SEND_QID))
 882		return -EAGAIN;
 883	if (atomic_read(&ctx->configuring))
 884		return -EAGAIN;
 885
 886	dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
 887
 888	crypt = get_crypt_desc();
 889	if (!crypt)
 890		return -ENOMEM;
 891
 892	crypt->data.ablk_req = req;
 893	crypt->crypto_ctx = dir->npe_ctx_phys;
 894	crypt->mode = dir->npe_mode;
 895	crypt->init_len = dir->npe_ctx_idx;
 896
 897	crypt->crypt_offs = 0;
 898	crypt->crypt_len = nbytes;
 899
 900	BUG_ON(ivsize && !req->info);
 901	memcpy(crypt->iv, req->info, ivsize);
 902	if (req->src != req->dst) {
 903		struct buffer_desc dst_hook;
 904		crypt->mode |= NPE_OP_NOT_IN_PLACE;
 905		/* This was never tested by Intel
 906		 * for more than one dst buffer, I think. */
 907		BUG_ON(req->dst->length < nbytes);
 908		req_ctx->dst = NULL;
 909		if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
 910					flags, DMA_FROM_DEVICE))
 911			goto free_buf_dest;
 912		src_direction = DMA_TO_DEVICE;
 913		req_ctx->dst = dst_hook.next;
 914		crypt->dst_buf = dst_hook.phys_next;
 915	} else {
 916		req_ctx->dst = NULL;
 917	}
 918	req_ctx->src = NULL;
 919	if (!chainup_buffers(dev, req->src, nbytes, &src_hook,
 920				flags, src_direction))
 921		goto free_buf_src;
 922
 923	req_ctx->src = src_hook.next;
 924	crypt->src_buf = src_hook.phys_next;
 925	crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK;
 926	qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
 927	BUG_ON(qmgr_stat_overflow(SEND_QID));
 928	return -EINPROGRESS;
 929
 930free_buf_src:
 931	free_buf_chain(dev, req_ctx->src, crypt->src_buf);
 932free_buf_dest:
 933	if (req->src != req->dst) {
 934		free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
 935	}
 936	crypt->ctl_flags = CTL_FLAG_UNUSED;
 937	return -ENOMEM;
 938}
 939
 940static int ablk_encrypt(struct ablkcipher_request *req)
 941{
 942	return ablk_perform(req, 1);
 943}
 944
 945static int ablk_decrypt(struct ablkcipher_request *req)
 946{
 947	return ablk_perform(req, 0);
 948}
 949
 950static int ablk_rfc3686_crypt(struct ablkcipher_request *req)
 951{
 952	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
 953	struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
 954	u8 iv[CTR_RFC3686_BLOCK_SIZE];
 955	u8 *info = req->info;
 956	int ret;
 957
 958	/* set up counter block */
 959        memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
 960	memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
 961
 962	/* initialize counter portion of counter block */
 963	*(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
 964		cpu_to_be32(1);
 965
 966	req->info = iv;
 967	ret = ablk_perform(req, 1);
 968	req->info = info;
 969	return ret;
 970}
 971
 972static int hmac_inconsistent(struct scatterlist *sg, unsigned start,
 973		unsigned int nbytes)
 974{
 975	int offset = 0;
 976
 977	if (!nbytes)
 978		return 0;
 979
 980	for (;;) {
 981		if (start < offset + sg->length)
 982			break;
 983
 984		offset += sg->length;
 985		sg = scatterwalk_sg_next(sg);
 986	}
 987	return (start + nbytes > offset + sg->length);
 988}
 989
 990static int aead_perform(struct aead_request *req, int encrypt,
 991		int cryptoffset, int eff_cryptlen, u8 *iv)
 992{
 993	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 994	struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
 995	unsigned ivsize = crypto_aead_ivsize(tfm);
 996	unsigned authsize = crypto_aead_authsize(tfm);
 997	struct ix_sa_dir *dir;
 998	struct crypt_ctl *crypt;
 999	unsigned int cryptlen;
1000	struct buffer_desc *buf, src_hook;
1001	struct aead_ctx *req_ctx = aead_request_ctx(req);
1002	struct device *dev = &pdev->dev;
1003	gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
1004				GFP_KERNEL : GFP_ATOMIC;
 
 
1005
1006	if (qmgr_stat_full(SEND_QID))
1007		return -EAGAIN;
1008	if (atomic_read(&ctx->configuring))
1009		return -EAGAIN;
1010
1011	if (encrypt) {
1012		dir = &ctx->encrypt;
1013		cryptlen = req->cryptlen;
1014	} else {
1015		dir = &ctx->decrypt;
1016		/* req->cryptlen includes the authsize when decrypting */
1017		cryptlen = req->cryptlen -authsize;
1018		eff_cryptlen -= authsize;
1019	}
1020	crypt = get_crypt_desc();
1021	if (!crypt)
1022		return -ENOMEM;
1023
1024	crypt->data.aead_req = req;
1025	crypt->crypto_ctx = dir->npe_ctx_phys;
1026	crypt->mode = dir->npe_mode;
1027	crypt->init_len = dir->npe_ctx_idx;
1028
1029	crypt->crypt_offs = cryptoffset;
1030	crypt->crypt_len = eff_cryptlen;
1031
1032	crypt->auth_offs = 0;
1033	crypt->auth_len = req->assoclen + ivsize + cryptlen;
1034	BUG_ON(ivsize && !req->iv);
1035	memcpy(crypt->iv, req->iv, ivsize);
1036
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1037	if (req->src != req->dst) {
1038		BUG(); /* -ENOTSUP because of my laziness */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1039	}
1040
1041	/* ASSOC data */
1042	buf = chainup_buffers(dev, req->assoc, req->assoclen, &src_hook,
1043		flags, DMA_TO_DEVICE);
1044	req_ctx->buffer = src_hook.next;
1045	crypt->src_buf = src_hook.phys_next;
1046	if (!buf)
1047		goto out;
1048	/* IV */
1049	sg_init_table(&req_ctx->ivlist, 1);
1050	sg_set_buf(&req_ctx->ivlist, iv, ivsize);
1051	buf = chainup_buffers(dev, &req_ctx->ivlist, ivsize, buf, flags,
1052			DMA_BIDIRECTIONAL);
1053	if (!buf)
1054		goto free_chain;
1055	if (unlikely(hmac_inconsistent(req->src, cryptlen, authsize))) {
1056		/* The 12 hmac bytes are scattered,
1057		 * we need to copy them into a safe buffer */
1058		req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
1059				&crypt->icv_rev_aes);
1060		if (unlikely(!req_ctx->hmac_virt))
1061			goto free_chain;
1062		if (!encrypt) {
1063			scatterwalk_map_and_copy(req_ctx->hmac_virt,
1064				req->src, cryptlen, authsize, 0);
1065		}
1066		req_ctx->encrypt = encrypt;
1067	} else {
1068		req_ctx->hmac_virt = NULL;
1069	}
1070	/* Crypt */
1071	buf = chainup_buffers(dev, req->src, cryptlen + authsize, buf, flags,
1072			DMA_BIDIRECTIONAL);
1073	if (!buf)
1074		goto free_hmac_virt;
1075	if (!req_ctx->hmac_virt) {
1076		crypt->icv_rev_aes = buf->phys_addr + buf->buf_len - authsize;
1077	}
1078
1079	crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
1080	qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
1081	BUG_ON(qmgr_stat_overflow(SEND_QID));
1082	return -EINPROGRESS;
1083free_hmac_virt:
1084	if (req_ctx->hmac_virt) {
1085		dma_pool_free(buffer_pool, req_ctx->hmac_virt,
1086				crypt->icv_rev_aes);
1087	}
1088free_chain:
1089	free_buf_chain(dev, req_ctx->buffer, crypt->src_buf);
1090out:
1091	crypt->ctl_flags = CTL_FLAG_UNUSED;
1092	return -ENOMEM;
1093}
1094
1095static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
1096{
1097	struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1098	u32 *flags = &tfm->base.crt_flags;
1099	unsigned digest_len = crypto_aead_alg(tfm)->maxauthsize;
1100	int ret;
1101
1102	if (!ctx->enckey_len && !ctx->authkey_len)
1103		return 0;
1104	init_completion(&ctx->completion);
1105	atomic_inc(&ctx->configuring);
1106
1107	reset_sa_dir(&ctx->encrypt);
1108	reset_sa_dir(&ctx->decrypt);
1109
1110	ret = setup_cipher(&tfm->base, 0, ctx->enckey, ctx->enckey_len);
1111	if (ret)
1112		goto out;
1113	ret = setup_cipher(&tfm->base, 1, ctx->enckey, ctx->enckey_len);
1114	if (ret)
1115		goto out;
1116	ret = setup_auth(&tfm->base, 0, authsize, ctx->authkey,
1117			ctx->authkey_len, digest_len);
1118	if (ret)
1119		goto out;
1120	ret = setup_auth(&tfm->base, 1, authsize,  ctx->authkey,
1121			ctx->authkey_len, digest_len);
1122	if (ret)
1123		goto out;
1124
1125	if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
1126		if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) {
1127			ret = -EINVAL;
1128			goto out;
1129		} else {
1130			*flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
1131		}
1132	}
1133out:
1134	if (!atomic_dec_and_test(&ctx->configuring))
1135		wait_for_completion(&ctx->completion);
1136	return ret;
1137}
1138
1139static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
1140{
1141	int max = crypto_aead_alg(tfm)->maxauthsize >> 2;
1142
1143	if ((authsize>>2) < 1 || (authsize>>2) > max || (authsize & 3))
1144		return -EINVAL;
1145	return aead_setup(tfm, authsize);
1146}
1147
1148static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
1149			unsigned int keylen)
1150{
1151	struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1152	struct crypto_authenc_keys keys;
1153
1154	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
1155		goto badkey;
1156
1157	if (keys.authkeylen > sizeof(ctx->authkey))
1158		goto badkey;
1159
1160	if (keys.enckeylen > sizeof(ctx->enckey))
1161		goto badkey;
1162
1163	memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
1164	memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
1165	ctx->authkey_len = keys.authkeylen;
1166	ctx->enckey_len = keys.enckeylen;
1167
 
1168	return aead_setup(tfm, crypto_aead_authsize(tfm));
1169badkey:
1170	crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1171	return -EINVAL;
1172}
1173
1174static int aead_encrypt(struct aead_request *req)
 
1175{
1176	unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
1177	return aead_perform(req, 1, req->assoclen + ivsize,
1178			req->cryptlen, req->iv);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1179}
1180
1181static int aead_decrypt(struct aead_request *req)
1182{
1183	unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
1184	return aead_perform(req, 0, req->assoclen + ivsize,
1185			req->cryptlen, req->iv);
1186}
1187
1188static int aead_givencrypt(struct aead_givcrypt_request *req)
1189{
1190	struct crypto_aead *tfm = aead_givcrypt_reqtfm(req);
1191	struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1192	unsigned len, ivsize = crypto_aead_ivsize(tfm);
1193	__be64 seq;
1194
1195	/* copied from eseqiv.c */
1196	if (!ctx->salted) {
1197		get_random_bytes(ctx->salt, ivsize);
1198		ctx->salted = 1;
1199	}
1200	memcpy(req->areq.iv, ctx->salt, ivsize);
1201	len = ivsize;
1202	if (ivsize > sizeof(u64)) {
1203		memset(req->giv, 0, ivsize - sizeof(u64));
1204		len = sizeof(u64);
1205	}
1206	seq = cpu_to_be64(req->seq);
1207	memcpy(req->giv + ivsize - len, &seq, len);
1208	return aead_perform(&req->areq, 1, req->areq.assoclen,
1209			req->areq.cryptlen +ivsize, req->giv);
1210}
1211
1212static struct ixp_alg ixp4xx_algos[] = {
1213{
1214	.crypto	= {
1215		.cra_name	= "cbc(des)",
1216		.cra_blocksize	= DES_BLOCK_SIZE,
1217		.cra_u		= { .ablkcipher = {
1218			.min_keysize	= DES_KEY_SIZE,
1219			.max_keysize	= DES_KEY_SIZE,
1220			.ivsize		= DES_BLOCK_SIZE,
1221			.geniv		= "eseqiv",
1222			}
1223		}
1224	},
1225	.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1226	.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1227
1228}, {
1229	.crypto	= {
1230		.cra_name	= "ecb(des)",
1231		.cra_blocksize	= DES_BLOCK_SIZE,
1232		.cra_u		= { .ablkcipher = {
1233			.min_keysize	= DES_KEY_SIZE,
1234			.max_keysize	= DES_KEY_SIZE,
1235			}
1236		}
1237	},
1238	.cfg_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192,
1239	.cfg_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192,
1240}, {
1241	.crypto	= {
1242		.cra_name	= "cbc(des3_ede)",
1243		.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
1244		.cra_u		= { .ablkcipher = {
1245			.min_keysize	= DES3_EDE_KEY_SIZE,
1246			.max_keysize	= DES3_EDE_KEY_SIZE,
1247			.ivsize		= DES3_EDE_BLOCK_SIZE,
1248			.geniv		= "eseqiv",
1249			}
1250		}
1251	},
1252	.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1253	.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1254}, {
1255	.crypto	= {
1256		.cra_name	= "ecb(des3_ede)",
1257		.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
1258		.cra_u		= { .ablkcipher = {
1259			.min_keysize	= DES3_EDE_KEY_SIZE,
1260			.max_keysize	= DES3_EDE_KEY_SIZE,
1261			}
1262		}
1263	},
1264	.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_ECB | KEYLEN_192,
1265	.cfg_dec = CIPH_DECR | MOD_3DES | MOD_ECB | KEYLEN_192,
1266}, {
1267	.crypto	= {
1268		.cra_name	= "cbc(aes)",
1269		.cra_blocksize	= AES_BLOCK_SIZE,
1270		.cra_u		= { .ablkcipher = {
1271			.min_keysize	= AES_MIN_KEY_SIZE,
1272			.max_keysize	= AES_MAX_KEY_SIZE,
1273			.ivsize		= AES_BLOCK_SIZE,
1274			.geniv		= "eseqiv",
1275			}
1276		}
1277	},
1278	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1279	.cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1280}, {
1281	.crypto	= {
1282		.cra_name	= "ecb(aes)",
1283		.cra_blocksize	= AES_BLOCK_SIZE,
1284		.cra_u		= { .ablkcipher = {
1285			.min_keysize	= AES_MIN_KEY_SIZE,
1286			.max_keysize	= AES_MAX_KEY_SIZE,
1287			}
1288		}
1289	},
1290	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_ECB,
1291	.cfg_dec = CIPH_DECR | MOD_AES | MOD_ECB,
1292}, {
1293	.crypto	= {
1294		.cra_name	= "ctr(aes)",
1295		.cra_blocksize	= AES_BLOCK_SIZE,
1296		.cra_u		= { .ablkcipher = {
1297			.min_keysize	= AES_MIN_KEY_SIZE,
1298			.max_keysize	= AES_MAX_KEY_SIZE,
1299			.ivsize		= AES_BLOCK_SIZE,
1300			.geniv		= "eseqiv",
1301			}
1302		}
1303	},
1304	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1305	.cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1306}, {
1307	.crypto	= {
1308		.cra_name	= "rfc3686(ctr(aes))",
1309		.cra_blocksize	= AES_BLOCK_SIZE,
1310		.cra_u		= { .ablkcipher = {
1311			.min_keysize	= AES_MIN_KEY_SIZE,
1312			.max_keysize	= AES_MAX_KEY_SIZE,
1313			.ivsize		= AES_BLOCK_SIZE,
1314			.geniv		= "eseqiv",
1315			.setkey		= ablk_rfc3686_setkey,
1316			.encrypt	= ablk_rfc3686_crypt,
1317			.decrypt	= ablk_rfc3686_crypt }
1318		}
1319	},
1320	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1321	.cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1322}, {
 
 
 
1323	.crypto	= {
1324		.cra_name	= "authenc(hmac(md5),cbc(des))",
1325		.cra_blocksize	= DES_BLOCK_SIZE,
1326		.cra_u		= { .aead = {
1327			.ivsize		= DES_BLOCK_SIZE,
1328			.maxauthsize	= MD5_DIGEST_SIZE,
1329			}
1330		}
1331	},
1332	.hash = &hash_alg_md5,
1333	.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1334	.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1335}, {
1336	.crypto	= {
1337		.cra_name	= "authenc(hmac(md5),cbc(des3_ede))",
1338		.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
1339		.cra_u		= { .aead = {
1340			.ivsize		= DES3_EDE_BLOCK_SIZE,
1341			.maxauthsize	= MD5_DIGEST_SIZE,
1342			}
1343		}
1344	},
1345	.hash = &hash_alg_md5,
1346	.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1347	.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1348}, {
1349	.crypto	= {
1350		.cra_name	= "authenc(hmac(sha1),cbc(des))",
1351		.cra_blocksize	= DES_BLOCK_SIZE,
1352		.cra_u		= { .aead = {
 
1353			.ivsize		= DES_BLOCK_SIZE,
1354			.maxauthsize	= SHA1_DIGEST_SIZE,
1355			}
1356		}
1357	},
1358	.hash = &hash_alg_sha1,
1359	.cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1360	.cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1361}, {
1362	.crypto	= {
1363		.cra_name	= "authenc(hmac(sha1),cbc(des3_ede))",
1364		.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
1365		.cra_u		= { .aead = {
1366			.ivsize		= DES3_EDE_BLOCK_SIZE,
1367			.maxauthsize	= SHA1_DIGEST_SIZE,
1368			}
1369		}
1370	},
1371	.hash = &hash_alg_sha1,
1372	.cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1373	.cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1374}, {
1375	.crypto	= {
1376		.cra_name	= "authenc(hmac(md5),cbc(aes))",
1377		.cra_blocksize	= AES_BLOCK_SIZE,
1378		.cra_u		= { .aead = {
1379			.ivsize		= AES_BLOCK_SIZE,
1380			.maxauthsize	= MD5_DIGEST_SIZE,
1381			}
1382		}
1383	},
1384	.hash = &hash_alg_md5,
1385	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1386	.cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1387}, {
1388	.crypto	= {
1389		.cra_name	= "authenc(hmac(sha1),cbc(aes))",
1390		.cra_blocksize	= AES_BLOCK_SIZE,
1391		.cra_u		= { .aead = {
1392			.ivsize		= AES_BLOCK_SIZE,
1393			.maxauthsize	= SHA1_DIGEST_SIZE,
1394			}
1395		}
1396	},
1397	.hash = &hash_alg_sha1,
1398	.cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1399	.cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1400} };
1401
1402#define IXP_POSTFIX "-ixp4xx"
1403
1404static const struct platform_device_info ixp_dev_info __initdata = {
1405	.name		= DRIVER_NAME,
1406	.id		= 0,
1407	.dma_mask	= DMA_BIT_MASK(32),
1408};
1409
1410static int __init ixp_module_init(void)
1411{
1412	int num = ARRAY_SIZE(ixp4xx_algos);
1413	int i, err;
1414
1415	pdev = platform_device_register_full(&ixp_dev_info);
1416	if (IS_ERR(pdev))
1417		return PTR_ERR(pdev);
1418
1419	spin_lock_init(&desc_lock);
1420	spin_lock_init(&emerg_lock);
1421
1422	err = init_ixp_crypto(&pdev->dev);
1423	if (err) {
1424		platform_device_unregister(pdev);
1425		return err;
1426	}
1427	for (i=0; i< num; i++) {
1428		struct crypto_alg *cra = &ixp4xx_algos[i].crypto;
1429
1430		if (snprintf(cra->cra_driver_name, CRYPTO_MAX_ALG_NAME,
1431			"%s"IXP_POSTFIX, cra->cra_name) >=
1432			CRYPTO_MAX_ALG_NAME)
1433		{
1434			continue;
1435		}
1436		if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES)) {
1437			continue;
1438		}
1439		if (!ixp4xx_algos[i].hash) {
1440			/* block ciphers */
1441			cra->cra_type = &crypto_ablkcipher_type;
1442			cra->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1443					 CRYPTO_ALG_KERN_DRIVER_ONLY |
1444					 CRYPTO_ALG_ASYNC;
1445			if (!cra->cra_ablkcipher.setkey)
1446				cra->cra_ablkcipher.setkey = ablk_setkey;
1447			if (!cra->cra_ablkcipher.encrypt)
1448				cra->cra_ablkcipher.encrypt = ablk_encrypt;
1449			if (!cra->cra_ablkcipher.decrypt)
1450				cra->cra_ablkcipher.decrypt = ablk_decrypt;
1451			cra->cra_init = init_tfm_ablk;
1452		} else {
1453			/* authenc */
1454			cra->cra_type = &crypto_aead_type;
1455			cra->cra_flags = CRYPTO_ALG_TYPE_AEAD |
1456					 CRYPTO_ALG_KERN_DRIVER_ONLY |
1457					 CRYPTO_ALG_ASYNC;
1458			cra->cra_aead.setkey = aead_setkey;
1459			cra->cra_aead.setauthsize = aead_setauthsize;
1460			cra->cra_aead.encrypt = aead_encrypt;
1461			cra->cra_aead.decrypt = aead_decrypt;
1462			cra->cra_aead.givencrypt = aead_givencrypt;
1463			cra->cra_init = init_tfm_aead;
1464		}
1465		cra->cra_ctxsize = sizeof(struct ixp_ctx);
1466		cra->cra_module = THIS_MODULE;
1467		cra->cra_alignmask = 3;
1468		cra->cra_priority = 300;
1469		cra->cra_exit = exit_tfm;
1470		if (crypto_register_alg(cra))
1471			printk(KERN_ERR "Failed to register '%s'\n",
1472				cra->cra_name);
1473		else
1474			ixp4xx_algos[i].registered = 1;
1475	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1476	return 0;
1477}
1478
1479static void __exit ixp_module_exit(void)
1480{
1481	int num = ARRAY_SIZE(ixp4xx_algos);
1482	int i;
1483
 
 
 
 
 
1484	for (i=0; i< num; i++) {
1485		if (ixp4xx_algos[i].registered)
1486			crypto_unregister_alg(&ixp4xx_algos[i].crypto);
1487	}
1488	release_ixp_crypto(&pdev->dev);
1489	platform_device_unregister(pdev);
1490}
1491
1492module_init(ixp_module_init);
1493module_exit(ixp_module_exit);
1494
1495MODULE_LICENSE("GPL");
1496MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
1497MODULE_DESCRIPTION("IXP4xx hardware crypto");
1498