Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
   3
   4#include <linux/kernel.h>
   5#include <linux/module.h>
   6#include <crypto/algapi.h>
   7#include <crypto/internal/aead.h>
   8#include <crypto/authenc.h>
   9#include <crypto/gcm.h>
  10#include <linux/rtnetlink.h>
  11#include <crypto/internal/des.h>
  12#include "cc_driver.h"
  13#include "cc_buffer_mgr.h"
  14#include "cc_aead.h"
  15#include "cc_request_mgr.h"
  16#include "cc_hash.h"
  17#include "cc_sram_mgr.h"
  18
  19#define template_aead	template_u.aead
  20
  21#define MAX_AEAD_SETKEY_SEQ 12
  22#define MAX_AEAD_PROCESS_SEQ 23
  23
  24#define MAX_HMAC_DIGEST_SIZE (SHA256_DIGEST_SIZE)
  25#define MAX_HMAC_BLOCK_SIZE (SHA256_BLOCK_SIZE)
  26
 
  27#define MAX_NONCE_SIZE CTR_RFC3686_NONCE_SIZE
  28
 
 
 
  29struct cc_aead_handle {
  30	u32 sram_workspace_addr;
  31	struct list_head aead_list;
  32};
  33
  34struct cc_hmac_s {
  35	u8 *padded_authkey;
  36	u8 *ipad_opad; /* IPAD, OPAD*/
  37	dma_addr_t padded_authkey_dma_addr;
  38	dma_addr_t ipad_opad_dma_addr;
  39};
  40
  41struct cc_xcbc_s {
  42	u8 *xcbc_keys; /* K1,K2,K3 */
  43	dma_addr_t xcbc_keys_dma_addr;
  44};
  45
  46struct cc_aead_ctx {
  47	struct cc_drvdata *drvdata;
  48	u8 ctr_nonce[MAX_NONCE_SIZE]; /* used for ctr3686 iv and aes ccm */
  49	u8 *enckey;
  50	dma_addr_t enckey_dma_addr;
  51	union {
  52		struct cc_hmac_s hmac;
  53		struct cc_xcbc_s xcbc;
  54	} auth_state;
  55	unsigned int enc_keylen;
  56	unsigned int auth_keylen;
  57	unsigned int authsize; /* Actual (reduced?) size of the MAC/ICv */
  58	unsigned int hash_len;
  59	enum drv_cipher_mode cipher_mode;
  60	enum cc_flow_mode flow_mode;
  61	enum drv_hash_mode auth_mode;
  62};
  63
 
 
 
 
 
  64static void cc_aead_exit(struct crypto_aead *tfm)
  65{
  66	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
  67	struct device *dev = drvdata_to_dev(ctx->drvdata);
  68
  69	dev_dbg(dev, "Clearing context @%p for %s\n", crypto_aead_ctx(tfm),
  70		crypto_tfm_alg_name(&tfm->base));
  71
  72	/* Unmap enckey buffer */
  73	if (ctx->enckey) {
  74		dma_free_coherent(dev, AES_MAX_KEY_SIZE, ctx->enckey,
  75				  ctx->enckey_dma_addr);
  76		dev_dbg(dev, "Freed enckey DMA buffer enckey_dma_addr=%pad\n",
  77			&ctx->enckey_dma_addr);
  78		ctx->enckey_dma_addr = 0;
  79		ctx->enckey = NULL;
  80	}
  81
  82	if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
  83		struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
  84
  85		if (xcbc->xcbc_keys) {
  86			dma_free_coherent(dev, CC_AES_128_BIT_KEY_SIZE * 3,
  87					  xcbc->xcbc_keys,
  88					  xcbc->xcbc_keys_dma_addr);
  89		}
  90		dev_dbg(dev, "Freed xcbc_keys DMA buffer xcbc_keys_dma_addr=%pad\n",
  91			&xcbc->xcbc_keys_dma_addr);
  92		xcbc->xcbc_keys_dma_addr = 0;
  93		xcbc->xcbc_keys = NULL;
  94	} else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC auth. */
  95		struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
  96
  97		if (hmac->ipad_opad) {
  98			dma_free_coherent(dev, 2 * MAX_HMAC_DIGEST_SIZE,
  99					  hmac->ipad_opad,
 100					  hmac->ipad_opad_dma_addr);
 101			dev_dbg(dev, "Freed ipad_opad DMA buffer ipad_opad_dma_addr=%pad\n",
 102				&hmac->ipad_opad_dma_addr);
 103			hmac->ipad_opad_dma_addr = 0;
 104			hmac->ipad_opad = NULL;
 105		}
 106		if (hmac->padded_authkey) {
 107			dma_free_coherent(dev, MAX_HMAC_BLOCK_SIZE,
 108					  hmac->padded_authkey,
 109					  hmac->padded_authkey_dma_addr);
 110			dev_dbg(dev, "Freed padded_authkey DMA buffer padded_authkey_dma_addr=%pad\n",
 111				&hmac->padded_authkey_dma_addr);
 112			hmac->padded_authkey_dma_addr = 0;
 113			hmac->padded_authkey = NULL;
 114		}
 115	}
 116}
 117
 118static unsigned int cc_get_aead_hash_len(struct crypto_aead *tfm)
 119{
 120	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 121
 122	return cc_get_default_hash_len(ctx->drvdata);
 123}
 124
 125static int cc_aead_init(struct crypto_aead *tfm)
 126{
 127	struct aead_alg *alg = crypto_aead_alg(tfm);
 128	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 129	struct cc_crypto_alg *cc_alg =
 130			container_of(alg, struct cc_crypto_alg, aead_alg);
 131	struct device *dev = drvdata_to_dev(cc_alg->drvdata);
 132
 133	dev_dbg(dev, "Initializing context @%p for %s\n", ctx,
 134		crypto_tfm_alg_name(&tfm->base));
 135
 136	/* Initialize modes in instance */
 137	ctx->cipher_mode = cc_alg->cipher_mode;
 138	ctx->flow_mode = cc_alg->flow_mode;
 139	ctx->auth_mode = cc_alg->auth_mode;
 140	ctx->drvdata = cc_alg->drvdata;
 141	crypto_aead_set_reqsize_dma(tfm, sizeof(struct aead_req_ctx));
 142
 143	/* Allocate key buffer, cache line aligned */
 144	ctx->enckey = dma_alloc_coherent(dev, AES_MAX_KEY_SIZE,
 145					 &ctx->enckey_dma_addr, GFP_KERNEL);
 146	if (!ctx->enckey) {
 147		dev_err(dev, "Failed allocating key buffer\n");
 148		goto init_failed;
 149	}
 150	dev_dbg(dev, "Allocated enckey buffer in context ctx->enckey=@%p\n",
 151		ctx->enckey);
 152
 153	/* Set default authlen value */
 154
 155	if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
 156		struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
 157		const unsigned int key_size = CC_AES_128_BIT_KEY_SIZE * 3;
 158
 159		/* Allocate dma-coherent buffer for XCBC's K1+K2+K3 */
 160		/* (and temporary for user key - up to 256b) */
 161		xcbc->xcbc_keys = dma_alloc_coherent(dev, key_size,
 162						     &xcbc->xcbc_keys_dma_addr,
 163						     GFP_KERNEL);
 164		if (!xcbc->xcbc_keys) {
 165			dev_err(dev, "Failed allocating buffer for XCBC keys\n");
 166			goto init_failed;
 167		}
 168	} else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC authentication */
 169		struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
 170		const unsigned int digest_size = 2 * MAX_HMAC_DIGEST_SIZE;
 171		dma_addr_t *pkey_dma = &hmac->padded_authkey_dma_addr;
 172
 173		/* Allocate dma-coherent buffer for IPAD + OPAD */
 174		hmac->ipad_opad = dma_alloc_coherent(dev, digest_size,
 175						     &hmac->ipad_opad_dma_addr,
 176						     GFP_KERNEL);
 177
 178		if (!hmac->ipad_opad) {
 179			dev_err(dev, "Failed allocating IPAD/OPAD buffer\n");
 180			goto init_failed;
 181		}
 182
 183		dev_dbg(dev, "Allocated authkey buffer in context ctx->authkey=@%p\n",
 184			hmac->ipad_opad);
 185
 186		hmac->padded_authkey = dma_alloc_coherent(dev,
 187							  MAX_HMAC_BLOCK_SIZE,
 188							  pkey_dma,
 189							  GFP_KERNEL);
 190
 191		if (!hmac->padded_authkey) {
 192			dev_err(dev, "failed to allocate padded_authkey\n");
 193			goto init_failed;
 194		}
 195	} else {
 196		ctx->auth_state.hmac.ipad_opad = NULL;
 197		ctx->auth_state.hmac.padded_authkey = NULL;
 198	}
 199	ctx->hash_len = cc_get_aead_hash_len(tfm);
 200
 201	return 0;
 202
 203init_failed:
 204	cc_aead_exit(tfm);
 205	return -ENOMEM;
 206}
 207
 208static void cc_aead_complete(struct device *dev, void *cc_req, int err)
 209{
 210	struct aead_request *areq = (struct aead_request *)cc_req;
 211	struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(areq);
 212	struct crypto_aead *tfm = crypto_aead_reqtfm(cc_req);
 213	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 214
 215	/* BACKLOG notification */
 216	if (err == -EINPROGRESS)
 217		goto done;
 218
 219	cc_unmap_aead_request(dev, areq);
 220
 221	/* Restore ordinary iv pointer */
 222	areq->iv = areq_ctx->backup_iv;
 223
 224	if (err)
 225		goto done;
 226
 227	if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
 228		if (memcmp(areq_ctx->mac_buf, areq_ctx->icv_virt_addr,
 229			   ctx->authsize) != 0) {
 230			dev_dbg(dev, "Payload authentication failure, (auth-size=%d, cipher=%d)\n",
 231				ctx->authsize, ctx->cipher_mode);
 232			/* In case of payload authentication failure, MUST NOT
 233			 * revealed the decrypted message --> zero its memory.
 234			 */
 235			sg_zero_buffer(areq->dst, sg_nents(areq->dst),
 236				       areq->cryptlen, areq->assoclen);
 237			err = -EBADMSG;
 238		}
 239	/*ENCRYPT*/
 240	} else if (areq_ctx->is_icv_fragmented) {
 241		u32 skip = areq->cryptlen + areq_ctx->dst_offset;
 242
 243		cc_copy_sg_portion(dev, areq_ctx->mac_buf, areq_ctx->dst_sgl,
 244				   skip, (skip + ctx->authsize),
 245				   CC_SG_FROM_BUF);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 246	}
 247done:
 248	aead_request_complete(areq, err);
 249}
 250
 251static unsigned int xcbc_setkey(struct cc_hw_desc *desc,
 252				struct cc_aead_ctx *ctx)
 253{
 254	/* Load the AES key */
 255	hw_desc_init(&desc[0]);
 256	/* We are using for the source/user key the same buffer
 257	 * as for the output keys, * because after this key loading it
 258	 * is not needed anymore
 259	 */
 260	set_din_type(&desc[0], DMA_DLLI,
 261		     ctx->auth_state.xcbc.xcbc_keys_dma_addr, ctx->auth_keylen,
 262		     NS_BIT);
 263	set_cipher_mode(&desc[0], DRV_CIPHER_ECB);
 264	set_cipher_config0(&desc[0], DRV_CRYPTO_DIRECTION_ENCRYPT);
 265	set_key_size_aes(&desc[0], ctx->auth_keylen);
 266	set_flow_mode(&desc[0], S_DIN_to_AES);
 267	set_setup_mode(&desc[0], SETUP_LOAD_KEY0);
 268
 269	hw_desc_init(&desc[1]);
 270	set_din_const(&desc[1], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
 271	set_flow_mode(&desc[1], DIN_AES_DOUT);
 272	set_dout_dlli(&desc[1], ctx->auth_state.xcbc.xcbc_keys_dma_addr,
 273		      AES_KEYSIZE_128, NS_BIT, 0);
 274
 275	hw_desc_init(&desc[2]);
 276	set_din_const(&desc[2], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
 277	set_flow_mode(&desc[2], DIN_AES_DOUT);
 278	set_dout_dlli(&desc[2], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
 279					 + AES_KEYSIZE_128),
 280			      AES_KEYSIZE_128, NS_BIT, 0);
 281
 282	hw_desc_init(&desc[3]);
 283	set_din_const(&desc[3], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
 284	set_flow_mode(&desc[3], DIN_AES_DOUT);
 285	set_dout_dlli(&desc[3], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
 286					  + 2 * AES_KEYSIZE_128),
 287			      AES_KEYSIZE_128, NS_BIT, 0);
 288
 289	return 4;
 290}
 291
 292static unsigned int hmac_setkey(struct cc_hw_desc *desc,
 293				struct cc_aead_ctx *ctx)
 294{
 295	unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
 296	unsigned int digest_ofs = 0;
 297	unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
 298			DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
 299	unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
 300			CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
 301	struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
 302
 303	unsigned int idx = 0;
 304	int i;
 305
 306	/* calc derived HMAC key */
 307	for (i = 0; i < 2; i++) {
 308		/* Load hash initial state */
 309		hw_desc_init(&desc[idx]);
 310		set_cipher_mode(&desc[idx], hash_mode);
 311		set_din_sram(&desc[idx],
 312			     cc_larval_digest_addr(ctx->drvdata,
 313						   ctx->auth_mode),
 314			     digest_size);
 315		set_flow_mode(&desc[idx], S_DIN_to_HASH);
 316		set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 317		idx++;
 318
 319		/* Load the hash current length*/
 320		hw_desc_init(&desc[idx]);
 321		set_cipher_mode(&desc[idx], hash_mode);
 322		set_din_const(&desc[idx], 0, ctx->hash_len);
 323		set_flow_mode(&desc[idx], S_DIN_to_HASH);
 324		set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 325		idx++;
 326
 327		/* Prepare ipad key */
 328		hw_desc_init(&desc[idx]);
 329		set_xor_val(&desc[idx], hmac_pad_const[i]);
 330		set_cipher_mode(&desc[idx], hash_mode);
 331		set_flow_mode(&desc[idx], S_DIN_to_HASH);
 332		set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
 333		idx++;
 334
 335		/* Perform HASH update */
 336		hw_desc_init(&desc[idx]);
 337		set_din_type(&desc[idx], DMA_DLLI,
 338			     hmac->padded_authkey_dma_addr,
 339			     SHA256_BLOCK_SIZE, NS_BIT);
 340		set_cipher_mode(&desc[idx], hash_mode);
 341		set_xor_active(&desc[idx]);
 342		set_flow_mode(&desc[idx], DIN_HASH);
 343		idx++;
 344
 345		/* Get the digset */
 346		hw_desc_init(&desc[idx]);
 347		set_cipher_mode(&desc[idx], hash_mode);
 348		set_dout_dlli(&desc[idx],
 349			      (hmac->ipad_opad_dma_addr + digest_ofs),
 350			      digest_size, NS_BIT, 0);
 351		set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 352		set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 353		set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
 354		idx++;
 355
 356		digest_ofs += digest_size;
 357	}
 358
 359	return idx;
 360}
 361
 362static int validate_keys_sizes(struct cc_aead_ctx *ctx)
 363{
 364	struct device *dev = drvdata_to_dev(ctx->drvdata);
 365
 366	dev_dbg(dev, "enc_keylen=%u  authkeylen=%u\n",
 367		ctx->enc_keylen, ctx->auth_keylen);
 368
 369	switch (ctx->auth_mode) {
 370	case DRV_HASH_SHA1:
 371	case DRV_HASH_SHA256:
 372		break;
 373	case DRV_HASH_XCBC_MAC:
 374		if (ctx->auth_keylen != AES_KEYSIZE_128 &&
 375		    ctx->auth_keylen != AES_KEYSIZE_192 &&
 376		    ctx->auth_keylen != AES_KEYSIZE_256)
 377			return -ENOTSUPP;
 378		break;
 379	case DRV_HASH_NULL: /* Not authenc (e.g., CCM) - no auth_key) */
 380		if (ctx->auth_keylen > 0)
 381			return -EINVAL;
 382		break;
 383	default:
 384		dev_dbg(dev, "Invalid auth_mode=%d\n", ctx->auth_mode);
 385		return -EINVAL;
 386	}
 387	/* Check cipher key size */
 388	if (ctx->flow_mode == S_DIN_to_DES) {
 389		if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) {
 390			dev_dbg(dev, "Invalid cipher(3DES) key size: %u\n",
 391				ctx->enc_keylen);
 392			return -EINVAL;
 393		}
 394	} else { /* Default assumed to be AES ciphers */
 395		if (ctx->enc_keylen != AES_KEYSIZE_128 &&
 396		    ctx->enc_keylen != AES_KEYSIZE_192 &&
 397		    ctx->enc_keylen != AES_KEYSIZE_256) {
 398			dev_dbg(dev, "Invalid cipher(AES) key size: %u\n",
 399				ctx->enc_keylen);
 400			return -EINVAL;
 401		}
 402	}
 403
 404	return 0; /* All tests of keys sizes passed */
 405}
 406
 407/* This function prepers the user key so it can pass to the hmac processing
 408 * (copy to intenral buffer or hash in case of key longer than block
 409 */
 410static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *authkey,
 411				 unsigned int keylen)
 412{
 413	dma_addr_t key_dma_addr = 0;
 414	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 415	struct device *dev = drvdata_to_dev(ctx->drvdata);
 416	u32 larval_addr;
 417	struct cc_crypto_req cc_req = {};
 418	unsigned int blocksize;
 419	unsigned int digestsize;
 420	unsigned int hashmode;
 421	unsigned int idx = 0;
 422	int rc = 0;
 423	u8 *key = NULL;
 424	struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
 425	dma_addr_t padded_authkey_dma_addr =
 426		ctx->auth_state.hmac.padded_authkey_dma_addr;
 427
 428	switch (ctx->auth_mode) { /* auth_key required and >0 */
 429	case DRV_HASH_SHA1:
 430		blocksize = SHA1_BLOCK_SIZE;
 431		digestsize = SHA1_DIGEST_SIZE;
 432		hashmode = DRV_HASH_HW_SHA1;
 433		break;
 434	case DRV_HASH_SHA256:
 435	default:
 436		blocksize = SHA256_BLOCK_SIZE;
 437		digestsize = SHA256_DIGEST_SIZE;
 438		hashmode = DRV_HASH_HW_SHA256;
 439	}
 440
 441	if (keylen != 0) {
 442
 443		key = kmemdup(authkey, keylen, GFP_KERNEL);
 444		if (!key)
 445			return -ENOMEM;
 446
 447		key_dma_addr = dma_map_single(dev, key, keylen, DMA_TO_DEVICE);
 448		if (dma_mapping_error(dev, key_dma_addr)) {
 449			dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
 450				key, keylen);
 451			kfree_sensitive(key);
 452			return -ENOMEM;
 453		}
 454		if (keylen > blocksize) {
 455			/* Load hash initial state */
 456			hw_desc_init(&desc[idx]);
 457			set_cipher_mode(&desc[idx], hashmode);
 458			larval_addr = cc_larval_digest_addr(ctx->drvdata,
 459							    ctx->auth_mode);
 460			set_din_sram(&desc[idx], larval_addr, digestsize);
 461			set_flow_mode(&desc[idx], S_DIN_to_HASH);
 462			set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 463			idx++;
 464
 465			/* Load the hash current length*/
 466			hw_desc_init(&desc[idx]);
 467			set_cipher_mode(&desc[idx], hashmode);
 468			set_din_const(&desc[idx], 0, ctx->hash_len);
 469			set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
 470			set_flow_mode(&desc[idx], S_DIN_to_HASH);
 471			set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 472			idx++;
 473
 474			hw_desc_init(&desc[idx]);
 475			set_din_type(&desc[idx], DMA_DLLI,
 476				     key_dma_addr, keylen, NS_BIT);
 477			set_flow_mode(&desc[idx], DIN_HASH);
 478			idx++;
 479
 480			/* Get hashed key */
 481			hw_desc_init(&desc[idx]);
 482			set_cipher_mode(&desc[idx], hashmode);
 483			set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
 484				      digestsize, NS_BIT, 0);
 485			set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 486			set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 487			set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
 488			set_cipher_config0(&desc[idx],
 489					   HASH_DIGEST_RESULT_LITTLE_ENDIAN);
 490			idx++;
 491
 492			hw_desc_init(&desc[idx]);
 493			set_din_const(&desc[idx], 0, (blocksize - digestsize));
 494			set_flow_mode(&desc[idx], BYPASS);
 495			set_dout_dlli(&desc[idx], (padded_authkey_dma_addr +
 496				      digestsize), (blocksize - digestsize),
 497				      NS_BIT, 0);
 498			idx++;
 499		} else {
 500			hw_desc_init(&desc[idx]);
 501			set_din_type(&desc[idx], DMA_DLLI, key_dma_addr,
 502				     keylen, NS_BIT);
 503			set_flow_mode(&desc[idx], BYPASS);
 504			set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
 505				      keylen, NS_BIT, 0);
 506			idx++;
 507
 508			if ((blocksize - keylen) != 0) {
 509				hw_desc_init(&desc[idx]);
 510				set_din_const(&desc[idx], 0,
 511					      (blocksize - keylen));
 512				set_flow_mode(&desc[idx], BYPASS);
 513				set_dout_dlli(&desc[idx],
 514					      (padded_authkey_dma_addr +
 515					       keylen),
 516					      (blocksize - keylen), NS_BIT, 0);
 517				idx++;
 518			}
 519		}
 520	} else {
 521		hw_desc_init(&desc[idx]);
 522		set_din_const(&desc[idx], 0, (blocksize - keylen));
 523		set_flow_mode(&desc[idx], BYPASS);
 524		set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
 525			      blocksize, NS_BIT, 0);
 526		idx++;
 527	}
 528
 529	rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
 530	if (rc)
 531		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
 532
 533	if (key_dma_addr)
 534		dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
 535
 536	kfree_sensitive(key);
 537
 538	return rc;
 539}
 540
 541static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
 542			  unsigned int keylen)
 543{
 544	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 
 545	struct cc_crypto_req cc_req = {};
 
 546	struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
 
 547	unsigned int seq_len = 0;
 548	struct device *dev = drvdata_to_dev(ctx->drvdata);
 549	const u8 *enckey, *authkey;
 550	int rc;
 551
 552	dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n",
 553		ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen);
 554
 555	/* STAT_PHASE_0: Init and sanity checks */
 556
 557	if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */
 558		struct crypto_authenc_keys keys;
 559
 560		rc = crypto_authenc_extractkeys(&keys, key, keylen);
 561		if (rc)
 562			return rc;
 563		enckey = keys.enckey;
 564		authkey = keys.authkey;
 565		ctx->enc_keylen = keys.enckeylen;
 566		ctx->auth_keylen = keys.authkeylen;
 
 
 
 
 567
 568		if (ctx->cipher_mode == DRV_CIPHER_CTR) {
 569			/* the nonce is stored in bytes at end of key */
 570			if (ctx->enc_keylen <
 571			    (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE))
 572				return -EINVAL;
 573			/* Copy nonce from last 4 bytes in CTR key to
 574			 *  first 4 bytes in CTR IV
 575			 */
 576			memcpy(ctx->ctr_nonce, enckey + ctx->enc_keylen -
 577			       CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE);
 
 578			/* Set CTR key size */
 579			ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE;
 580		}
 581	} else { /* non-authenc - has just one key */
 582		enckey = key;
 583		authkey = NULL;
 584		ctx->enc_keylen = keylen;
 585		ctx->auth_keylen = 0;
 586	}
 587
 588	rc = validate_keys_sizes(ctx);
 589	if (rc)
 590		return rc;
 591
 592	/* STAT_PHASE_1: Copy key to ctx */
 593
 594	/* Get key material */
 595	memcpy(ctx->enckey, enckey, ctx->enc_keylen);
 596	if (ctx->enc_keylen == 24)
 597		memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
 598	if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
 599		memcpy(ctx->auth_state.xcbc.xcbc_keys, authkey,
 600		       ctx->auth_keylen);
 601	} else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
 602		rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen);
 603		if (rc)
 604			return rc;
 605	}
 606
 607	/* STAT_PHASE_2: Create sequence */
 608
 609	switch (ctx->auth_mode) {
 610	case DRV_HASH_SHA1:
 611	case DRV_HASH_SHA256:
 612		seq_len = hmac_setkey(desc, ctx);
 613		break;
 614	case DRV_HASH_XCBC_MAC:
 615		seq_len = xcbc_setkey(desc, ctx);
 616		break;
 617	case DRV_HASH_NULL: /* non-authenc modes, e.g., CCM */
 618		break; /* No auth. key setup */
 619	default:
 620		dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
 621		return -ENOTSUPP;
 
 622	}
 623
 624	/* STAT_PHASE_3: Submit sequence to HW */
 625
 626	if (seq_len > 0) { /* For CCM there is no sequence to setup the key */
 627		rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, seq_len);
 628		if (rc) {
 629			dev_err(dev, "send_request() failed (rc=%d)\n", rc);
 630			return rc;
 631		}
 632	}
 633
 634	/* Update STAT_PHASE_3 */
 635	return rc;
 636}
 637
 638static int cc_des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
 639			       unsigned int keylen)
 640{
 641	struct crypto_authenc_keys keys;
 642	int err;
 643
 644	err = crypto_authenc_extractkeys(&keys, key, keylen);
 645	if (unlikely(err))
 646		return err;
 647
 648	err = verify_aead_des3_key(aead, keys.enckey, keys.enckeylen) ?:
 649	      cc_aead_setkey(aead, key, keylen);
 650
 651	memzero_explicit(&keys, sizeof(keys));
 652	return err;
 653}
 654
 655static int cc_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key,
 656				 unsigned int keylen)
 657{
 658	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 659
 660	if (keylen < 3)
 661		return -EINVAL;
 662
 663	keylen -= 3;
 664	memcpy(ctx->ctr_nonce, key + keylen, 3);
 665
 666	return cc_aead_setkey(tfm, key, keylen);
 667}
 668
 669static int cc_aead_setauthsize(struct crypto_aead *authenc,
 670			       unsigned int authsize)
 671{
 672	struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
 673	struct device *dev = drvdata_to_dev(ctx->drvdata);
 674
 675	/* Unsupported auth. sizes */
 676	if (authsize == 0 ||
 677	    authsize > crypto_aead_maxauthsize(authenc)) {
 678		return -ENOTSUPP;
 679	}
 680
 681	ctx->authsize = authsize;
 682	dev_dbg(dev, "authlen=%d\n", ctx->authsize);
 683
 684	return 0;
 685}
 686
 687static int cc_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
 688				      unsigned int authsize)
 689{
 690	switch (authsize) {
 691	case 8:
 692	case 12:
 693	case 16:
 694		break;
 695	default:
 696		return -EINVAL;
 697	}
 698
 699	return cc_aead_setauthsize(authenc, authsize);
 700}
 701
 702static int cc_ccm_setauthsize(struct crypto_aead *authenc,
 703			      unsigned int authsize)
 704{
 705	switch (authsize) {
 706	case 4:
 707	case 6:
 708	case 8:
 709	case 10:
 710	case 12:
 711	case 14:
 712	case 16:
 713		break;
 714	default:
 715		return -EINVAL;
 716	}
 717
 718	return cc_aead_setauthsize(authenc, authsize);
 719}
 720
 721static void cc_set_assoc_desc(struct aead_request *areq, unsigned int flow_mode,
 722			      struct cc_hw_desc desc[], unsigned int *seq_size)
 723{
 724	struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
 725	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 726	struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(areq);
 727	enum cc_req_dma_buf_type assoc_dma_type = areq_ctx->assoc_buff_type;
 728	unsigned int idx = *seq_size;
 729	struct device *dev = drvdata_to_dev(ctx->drvdata);
 730
 731	switch (assoc_dma_type) {
 732	case CC_DMA_BUF_DLLI:
 733		dev_dbg(dev, "ASSOC buffer type DLLI\n");
 734		hw_desc_init(&desc[idx]);
 735		set_din_type(&desc[idx], DMA_DLLI, sg_dma_address(areq->src),
 736			     areq_ctx->assoclen, NS_BIT);
 737		set_flow_mode(&desc[idx], flow_mode);
 738		if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
 739		    areq_ctx->cryptlen > 0)
 740			set_din_not_last_indication(&desc[idx]);
 741		break;
 742	case CC_DMA_BUF_MLLI:
 743		dev_dbg(dev, "ASSOC buffer type MLLI\n");
 744		hw_desc_init(&desc[idx]);
 745		set_din_type(&desc[idx], DMA_MLLI, areq_ctx->assoc.sram_addr,
 746			     areq_ctx->assoc.mlli_nents, NS_BIT);
 747		set_flow_mode(&desc[idx], flow_mode);
 748		if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
 749		    areq_ctx->cryptlen > 0)
 750			set_din_not_last_indication(&desc[idx]);
 751		break;
 752	case CC_DMA_BUF_NULL:
 753	default:
 754		dev_err(dev, "Invalid ASSOC buffer type\n");
 755	}
 756
 757	*seq_size = (++idx);
 758}
 759
 760static void cc_proc_authen_desc(struct aead_request *areq,
 761				unsigned int flow_mode,
 762				struct cc_hw_desc desc[],
 763				unsigned int *seq_size, int direct)
 764{
 765	struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(areq);
 766	enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
 767	unsigned int idx = *seq_size;
 768	struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
 769	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 770	struct device *dev = drvdata_to_dev(ctx->drvdata);
 771
 772	switch (data_dma_type) {
 773	case CC_DMA_BUF_DLLI:
 774	{
 775		struct scatterlist *cipher =
 776			(direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
 777			areq_ctx->dst_sgl : areq_ctx->src_sgl;
 778
 779		unsigned int offset =
 780			(direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
 781			areq_ctx->dst_offset : areq_ctx->src_offset;
 782		dev_dbg(dev, "AUTHENC: SRC/DST buffer type DLLI\n");
 783		hw_desc_init(&desc[idx]);
 784		set_din_type(&desc[idx], DMA_DLLI,
 785			     (sg_dma_address(cipher) + offset),
 786			     areq_ctx->cryptlen, NS_BIT);
 787		set_flow_mode(&desc[idx], flow_mode);
 788		break;
 789	}
 790	case CC_DMA_BUF_MLLI:
 791	{
 792		/* DOUBLE-PASS flow (as default)
 793		 * assoc. + iv + data -compact in one table
 794		 * if assoclen is ZERO only IV perform
 795		 */
 796		u32 mlli_addr = areq_ctx->assoc.sram_addr;
 797		u32 mlli_nents = areq_ctx->assoc.mlli_nents;
 798
 799		if (areq_ctx->is_single_pass) {
 800			if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
 801				mlli_addr = areq_ctx->dst.sram_addr;
 802				mlli_nents = areq_ctx->dst.mlli_nents;
 803			} else {
 804				mlli_addr = areq_ctx->src.sram_addr;
 805				mlli_nents = areq_ctx->src.mlli_nents;
 806			}
 807		}
 808
 809		dev_dbg(dev, "AUTHENC: SRC/DST buffer type MLLI\n");
 810		hw_desc_init(&desc[idx]);
 811		set_din_type(&desc[idx], DMA_MLLI, mlli_addr, mlli_nents,
 812			     NS_BIT);
 813		set_flow_mode(&desc[idx], flow_mode);
 814		break;
 815	}
 816	case CC_DMA_BUF_NULL:
 817	default:
 818		dev_err(dev, "AUTHENC: Invalid SRC/DST buffer type\n");
 819	}
 820
 821	*seq_size = (++idx);
 822}
 823
 824static void cc_proc_cipher_desc(struct aead_request *areq,
 825				unsigned int flow_mode,
 826				struct cc_hw_desc desc[],
 827				unsigned int *seq_size)
 828{
 829	unsigned int idx = *seq_size;
 830	struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(areq);
 831	enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
 832	struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
 833	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 834	struct device *dev = drvdata_to_dev(ctx->drvdata);
 835
 836	if (areq_ctx->cryptlen == 0)
 837		return; /*null processing*/
 838
 839	switch (data_dma_type) {
 840	case CC_DMA_BUF_DLLI:
 841		dev_dbg(dev, "CIPHER: SRC/DST buffer type DLLI\n");
 842		hw_desc_init(&desc[idx]);
 843		set_din_type(&desc[idx], DMA_DLLI,
 844			     (sg_dma_address(areq_ctx->src_sgl) +
 845			      areq_ctx->src_offset), areq_ctx->cryptlen,
 846			      NS_BIT);
 847		set_dout_dlli(&desc[idx],
 848			      (sg_dma_address(areq_ctx->dst_sgl) +
 849			       areq_ctx->dst_offset),
 850			      areq_ctx->cryptlen, NS_BIT, 0);
 851		set_flow_mode(&desc[idx], flow_mode);
 852		break;
 853	case CC_DMA_BUF_MLLI:
 854		dev_dbg(dev, "CIPHER: SRC/DST buffer type MLLI\n");
 855		hw_desc_init(&desc[idx]);
 856		set_din_type(&desc[idx], DMA_MLLI, areq_ctx->src.sram_addr,
 857			     areq_ctx->src.mlli_nents, NS_BIT);
 858		set_dout_mlli(&desc[idx], areq_ctx->dst.sram_addr,
 859			      areq_ctx->dst.mlli_nents, NS_BIT, 0);
 860		set_flow_mode(&desc[idx], flow_mode);
 861		break;
 862	case CC_DMA_BUF_NULL:
 863	default:
 864		dev_err(dev, "CIPHER: Invalid SRC/DST buffer type\n");
 865	}
 866
 867	*seq_size = (++idx);
 868}
 869
 870static void cc_proc_digest_desc(struct aead_request *req,
 871				struct cc_hw_desc desc[],
 872				unsigned int *seq_size)
 873{
 874	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 875	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 876	struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req);
 877	unsigned int idx = *seq_size;
 878	unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
 879				DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
 880	int direct = req_ctx->gen_ctx.op_type;
 881
 882	/* Get final ICV result */
 883	if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
 884		hw_desc_init(&desc[idx]);
 885		set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 886		set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 887		set_dout_dlli(&desc[idx], req_ctx->icv_dma_addr, ctx->authsize,
 888			      NS_BIT, 1);
 889		set_queue_last_ind(ctx->drvdata, &desc[idx]);
 890		if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
 891			set_aes_not_hash_mode(&desc[idx]);
 892			set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
 893		} else {
 894			set_cipher_config0(&desc[idx],
 895					   HASH_DIGEST_RESULT_LITTLE_ENDIAN);
 896			set_cipher_mode(&desc[idx], hash_mode);
 897		}
 898	} else { /*Decrypt*/
 899		/* Get ICV out from hardware */
 900		hw_desc_init(&desc[idx]);
 901		set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 902		set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 903		set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr,
 904			      ctx->authsize, NS_BIT, 1);
 905		set_queue_last_ind(ctx->drvdata, &desc[idx]);
 906		set_cipher_config0(&desc[idx],
 907				   HASH_DIGEST_RESULT_LITTLE_ENDIAN);
 908		set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
 909		if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
 910			set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
 911			set_aes_not_hash_mode(&desc[idx]);
 912		} else {
 913			set_cipher_mode(&desc[idx], hash_mode);
 914		}
 915	}
 916
 917	*seq_size = (++idx);
 918}
 919
 920static void cc_set_cipher_desc(struct aead_request *req,
 921			       struct cc_hw_desc desc[],
 922			       unsigned int *seq_size)
 923{
 924	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 925	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 926	struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req);
 927	unsigned int hw_iv_size = req_ctx->hw_iv_size;
 928	unsigned int idx = *seq_size;
 929	int direct = req_ctx->gen_ctx.op_type;
 930
 931	/* Setup cipher state */
 932	hw_desc_init(&desc[idx]);
 933	set_cipher_config0(&desc[idx], direct);
 934	set_flow_mode(&desc[idx], ctx->flow_mode);
 935	set_din_type(&desc[idx], DMA_DLLI, req_ctx->gen_ctx.iv_dma_addr,
 936		     hw_iv_size, NS_BIT);
 937	if (ctx->cipher_mode == DRV_CIPHER_CTR)
 938		set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
 939	else
 940		set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 941	set_cipher_mode(&desc[idx], ctx->cipher_mode);
 942	idx++;
 943
 944	/* Setup enc. key */
 945	hw_desc_init(&desc[idx]);
 946	set_cipher_config0(&desc[idx], direct);
 947	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 948	set_flow_mode(&desc[idx], ctx->flow_mode);
 949	if (ctx->flow_mode == S_DIN_to_AES) {
 950		set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
 951			     ((ctx->enc_keylen == 24) ? CC_AES_KEY_SIZE_MAX :
 952			      ctx->enc_keylen), NS_BIT);
 953		set_key_size_aes(&desc[idx], ctx->enc_keylen);
 954	} else {
 955		set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
 956			     ctx->enc_keylen, NS_BIT);
 957		set_key_size_des(&desc[idx], ctx->enc_keylen);
 958	}
 959	set_cipher_mode(&desc[idx], ctx->cipher_mode);
 960	idx++;
 961
 962	*seq_size = idx;
 963}
 964
 965static void cc_proc_cipher(struct aead_request *req, struct cc_hw_desc desc[],
 966			   unsigned int *seq_size, unsigned int data_flow_mode)
 967{
 968	struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req);
 969	int direct = req_ctx->gen_ctx.op_type;
 970	unsigned int idx = *seq_size;
 971
 972	if (req_ctx->cryptlen == 0)
 973		return; /*null processing*/
 974
 975	cc_set_cipher_desc(req, desc, &idx);
 976	cc_proc_cipher_desc(req, data_flow_mode, desc, &idx);
 977	if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
 978		/* We must wait for DMA to write all cipher */
 979		hw_desc_init(&desc[idx]);
 980		set_din_no_dma(&desc[idx], 0, 0xfffff0);
 981		set_dout_no_dma(&desc[idx], 0, 0, 1);
 982		idx++;
 983	}
 984
 985	*seq_size = idx;
 986}
 987
 988static void cc_set_hmac_desc(struct aead_request *req, struct cc_hw_desc desc[],
 989			     unsigned int *seq_size)
 990{
 991	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 992	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 993	unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
 994				DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
 995	unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
 996				CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
 997	unsigned int idx = *seq_size;
 998
 999	/* Loading hash ipad xor key state */
1000	hw_desc_init(&desc[idx]);
1001	set_cipher_mode(&desc[idx], hash_mode);
1002	set_din_type(&desc[idx], DMA_DLLI,
1003		     ctx->auth_state.hmac.ipad_opad_dma_addr, digest_size,
1004		     NS_BIT);
1005	set_flow_mode(&desc[idx], S_DIN_to_HASH);
1006	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1007	idx++;
1008
1009	/* Load init. digest len (64 bytes) */
1010	hw_desc_init(&desc[idx]);
1011	set_cipher_mode(&desc[idx], hash_mode);
1012	set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
1013		     ctx->hash_len);
1014	set_flow_mode(&desc[idx], S_DIN_to_HASH);
1015	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1016	idx++;
1017
1018	*seq_size = idx;
1019}
1020
1021static void cc_set_xcbc_desc(struct aead_request *req, struct cc_hw_desc desc[],
1022			     unsigned int *seq_size)
1023{
1024	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1025	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1026	unsigned int idx = *seq_size;
1027
1028	/* Loading MAC state */
1029	hw_desc_init(&desc[idx]);
1030	set_din_const(&desc[idx], 0, CC_AES_BLOCK_SIZE);
1031	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1032	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1033	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1034	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1035	set_flow_mode(&desc[idx], S_DIN_to_HASH);
1036	set_aes_not_hash_mode(&desc[idx]);
1037	idx++;
1038
1039	/* Setup XCBC MAC K1 */
1040	hw_desc_init(&desc[idx]);
1041	set_din_type(&desc[idx], DMA_DLLI,
1042		     ctx->auth_state.xcbc.xcbc_keys_dma_addr,
1043		     AES_KEYSIZE_128, NS_BIT);
1044	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1045	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1046	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1047	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1048	set_flow_mode(&desc[idx], S_DIN_to_HASH);
1049	set_aes_not_hash_mode(&desc[idx]);
1050	idx++;
1051
1052	/* Setup XCBC MAC K2 */
1053	hw_desc_init(&desc[idx]);
1054	set_din_type(&desc[idx], DMA_DLLI,
1055		     (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
1056		      AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT);
1057	set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1058	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1059	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1060	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1061	set_flow_mode(&desc[idx], S_DIN_to_HASH);
1062	set_aes_not_hash_mode(&desc[idx]);
1063	idx++;
1064
1065	/* Setup XCBC MAC K3 */
1066	hw_desc_init(&desc[idx]);
1067	set_din_type(&desc[idx], DMA_DLLI,
1068		     (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
1069		      2 * AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT);
1070	set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
1071	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1072	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1073	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1074	set_flow_mode(&desc[idx], S_DIN_to_HASH);
1075	set_aes_not_hash_mode(&desc[idx]);
1076	idx++;
1077
1078	*seq_size = idx;
1079}
1080
1081static void cc_proc_header_desc(struct aead_request *req,
1082				struct cc_hw_desc desc[],
1083				unsigned int *seq_size)
1084{
1085	struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
1086	unsigned int idx = *seq_size;
1087
1088	/* Hash associated data */
1089	if (areq_ctx->assoclen > 0)
1090		cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
1091
1092	/* Hash IV */
1093	*seq_size = idx;
1094}
1095
1096static void cc_proc_scheme_desc(struct aead_request *req,
1097				struct cc_hw_desc desc[],
1098				unsigned int *seq_size)
1099{
1100	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1101	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1102	struct cc_aead_handle *aead_handle = ctx->drvdata->aead_handle;
1103	unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
1104				DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
1105	unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
1106				CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
1107	unsigned int idx = *seq_size;
1108
1109	hw_desc_init(&desc[idx]);
1110	set_cipher_mode(&desc[idx], hash_mode);
1111	set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
1112		      ctx->hash_len);
1113	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1114	set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
1115	set_cipher_do(&desc[idx], DO_PAD);
1116	idx++;
1117
1118	/* Get final ICV result */
1119	hw_desc_init(&desc[idx]);
1120	set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
1121		      digest_size);
1122	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1123	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1124	set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
1125	set_cipher_mode(&desc[idx], hash_mode);
1126	idx++;
1127
1128	/* Loading hash opad xor key state */
1129	hw_desc_init(&desc[idx]);
1130	set_cipher_mode(&desc[idx], hash_mode);
1131	set_din_type(&desc[idx], DMA_DLLI,
1132		     (ctx->auth_state.hmac.ipad_opad_dma_addr + digest_size),
1133		     digest_size, NS_BIT);
1134	set_flow_mode(&desc[idx], S_DIN_to_HASH);
1135	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1136	idx++;
1137
1138	/* Load init. digest len (64 bytes) */
1139	hw_desc_init(&desc[idx]);
1140	set_cipher_mode(&desc[idx], hash_mode);
1141	set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
1142		     ctx->hash_len);
1143	set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1144	set_flow_mode(&desc[idx], S_DIN_to_HASH);
1145	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1146	idx++;
1147
1148	/* Perform HASH update */
1149	hw_desc_init(&desc[idx]);
1150	set_din_sram(&desc[idx], aead_handle->sram_workspace_addr,
1151		     digest_size);
1152	set_flow_mode(&desc[idx], DIN_HASH);
1153	idx++;
1154
1155	*seq_size = idx;
1156}
1157
1158static void cc_mlli_to_sram(struct aead_request *req,
1159			    struct cc_hw_desc desc[], unsigned int *seq_size)
1160{
1161	struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req);
1162	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1163	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1164	struct device *dev = drvdata_to_dev(ctx->drvdata);
1165
1166	if ((req_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
1167	    req_ctx->data_buff_type == CC_DMA_BUF_MLLI ||
1168	    !req_ctx->is_single_pass) && req_ctx->mlli_params.mlli_len) {
1169		dev_dbg(dev, "Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
1170			ctx->drvdata->mlli_sram_addr,
1171			req_ctx->mlli_params.mlli_len);
1172		/* Copy MLLI table host-to-sram */
1173		hw_desc_init(&desc[*seq_size]);
1174		set_din_type(&desc[*seq_size], DMA_DLLI,
1175			     req_ctx->mlli_params.mlli_dma_addr,
1176			     req_ctx->mlli_params.mlli_len, NS_BIT);
1177		set_dout_sram(&desc[*seq_size],
1178			      ctx->drvdata->mlli_sram_addr,
1179			      req_ctx->mlli_params.mlli_len);
1180		set_flow_mode(&desc[*seq_size], BYPASS);
1181		(*seq_size)++;
1182	}
1183}
1184
1185static enum cc_flow_mode cc_get_data_flow(enum drv_crypto_direction direct,
1186					  enum cc_flow_mode setup_flow_mode,
1187					  bool is_single_pass)
1188{
1189	enum cc_flow_mode data_flow_mode;
1190
1191	if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1192		if (setup_flow_mode == S_DIN_to_AES)
1193			data_flow_mode = is_single_pass ?
1194				AES_to_HASH_and_DOUT : DIN_AES_DOUT;
1195		else
1196			data_flow_mode = is_single_pass ?
1197				DES_to_HASH_and_DOUT : DIN_DES_DOUT;
1198	} else { /* Decrypt */
1199		if (setup_flow_mode == S_DIN_to_AES)
1200			data_flow_mode = is_single_pass ?
1201				AES_and_HASH : DIN_AES_DOUT;
1202		else
1203			data_flow_mode = is_single_pass ?
1204				DES_and_HASH : DIN_DES_DOUT;
1205	}
1206
1207	return data_flow_mode;
1208}
1209
1210static void cc_hmac_authenc(struct aead_request *req, struct cc_hw_desc desc[],
1211			    unsigned int *seq_size)
1212{
1213	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1214	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1215	struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req);
1216	int direct = req_ctx->gen_ctx.op_type;
1217	unsigned int data_flow_mode =
1218		cc_get_data_flow(direct, ctx->flow_mode,
1219				 req_ctx->is_single_pass);
1220
1221	if (req_ctx->is_single_pass) {
1222		/*
1223		 * Single-pass flow
1224		 */
1225		cc_set_hmac_desc(req, desc, seq_size);
1226		cc_set_cipher_desc(req, desc, seq_size);
1227		cc_proc_header_desc(req, desc, seq_size);
1228		cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
1229		cc_proc_scheme_desc(req, desc, seq_size);
1230		cc_proc_digest_desc(req, desc, seq_size);
1231		return;
1232	}
1233
1234	/*
1235	 * Double-pass flow
1236	 * Fallback for unsupported single-pass modes,
1237	 * i.e. using assoc. data of non-word-multiple
1238	 */
1239	if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1240		/* encrypt first.. */
1241		cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1242		/* authenc after..*/
1243		cc_set_hmac_desc(req, desc, seq_size);
1244		cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1245		cc_proc_scheme_desc(req, desc, seq_size);
1246		cc_proc_digest_desc(req, desc, seq_size);
1247
1248	} else { /*DECRYPT*/
1249		/* authenc first..*/
1250		cc_set_hmac_desc(req, desc, seq_size);
1251		cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1252		cc_proc_scheme_desc(req, desc, seq_size);
1253		/* decrypt after.. */
1254		cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1255		/* read the digest result with setting the completion bit
1256		 * must be after the cipher operation
1257		 */
1258		cc_proc_digest_desc(req, desc, seq_size);
1259	}
1260}
1261
1262static void
1263cc_xcbc_authenc(struct aead_request *req, struct cc_hw_desc desc[],
1264		unsigned int *seq_size)
1265{
1266	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1267	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1268	struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req);
1269	int direct = req_ctx->gen_ctx.op_type;
1270	unsigned int data_flow_mode =
1271		cc_get_data_flow(direct, ctx->flow_mode,
1272				 req_ctx->is_single_pass);
1273
1274	if (req_ctx->is_single_pass) {
1275		/*
1276		 * Single-pass flow
1277		 */
1278		cc_set_xcbc_desc(req, desc, seq_size);
1279		cc_set_cipher_desc(req, desc, seq_size);
1280		cc_proc_header_desc(req, desc, seq_size);
1281		cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
1282		cc_proc_digest_desc(req, desc, seq_size);
1283		return;
1284	}
1285
1286	/*
1287	 * Double-pass flow
1288	 * Fallback for unsupported single-pass modes,
1289	 * i.e. using assoc. data of non-word-multiple
1290	 */
1291	if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1292		/* encrypt first.. */
1293		cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1294		/* authenc after.. */
1295		cc_set_xcbc_desc(req, desc, seq_size);
1296		cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1297		cc_proc_digest_desc(req, desc, seq_size);
1298	} else { /*DECRYPT*/
1299		/* authenc first.. */
1300		cc_set_xcbc_desc(req, desc, seq_size);
1301		cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1302		/* decrypt after..*/
1303		cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1304		/* read the digest result with setting the completion bit
1305		 * must be after the cipher operation
1306		 */
1307		cc_proc_digest_desc(req, desc, seq_size);
1308	}
1309}
1310
1311static int validate_data_size(struct cc_aead_ctx *ctx,
1312			      enum drv_crypto_direction direct,
1313			      struct aead_request *req)
1314{
1315	struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
1316	struct device *dev = drvdata_to_dev(ctx->drvdata);
1317	unsigned int assoclen = areq_ctx->assoclen;
1318	unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
1319			(req->cryptlen - ctx->authsize) : req->cryptlen;
1320
1321	if (direct == DRV_CRYPTO_DIRECTION_DECRYPT &&
1322	    req->cryptlen < ctx->authsize)
1323		goto data_size_err;
1324
1325	areq_ctx->is_single_pass = true; /*defaulted to fast flow*/
1326
1327	switch (ctx->flow_mode) {
1328	case S_DIN_to_AES:
1329		if (ctx->cipher_mode == DRV_CIPHER_CBC &&
1330		    !IS_ALIGNED(cipherlen, AES_BLOCK_SIZE))
1331			goto data_size_err;
1332		if (ctx->cipher_mode == DRV_CIPHER_CCM)
1333			break;
1334		if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
1335			if (areq_ctx->plaintext_authenticate_only)
1336				areq_ctx->is_single_pass = false;
1337			break;
1338		}
1339
1340		if (!IS_ALIGNED(assoclen, sizeof(u32)))
1341			areq_ctx->is_single_pass = false;
1342
1343		if (ctx->cipher_mode == DRV_CIPHER_CTR &&
1344		    !IS_ALIGNED(cipherlen, sizeof(u32)))
1345			areq_ctx->is_single_pass = false;
1346
1347		break;
1348	case S_DIN_to_DES:
1349		if (!IS_ALIGNED(cipherlen, DES_BLOCK_SIZE))
1350			goto data_size_err;
1351		if (!IS_ALIGNED(assoclen, DES_BLOCK_SIZE))
1352			areq_ctx->is_single_pass = false;
1353		break;
1354	default:
1355		dev_err(dev, "Unexpected flow mode (%d)\n", ctx->flow_mode);
1356		goto data_size_err;
1357	}
1358
1359	return 0;
1360
1361data_size_err:
1362	return -EINVAL;
1363}
1364
1365static unsigned int format_ccm_a0(u8 *pa0_buff, u32 header_size)
1366{
1367	unsigned int len = 0;
1368
1369	if (header_size == 0)
1370		return 0;
1371
1372	if (header_size < ((1UL << 16) - (1UL << 8))) {
1373		len = 2;
1374
1375		pa0_buff[0] = (header_size >> 8) & 0xFF;
1376		pa0_buff[1] = header_size & 0xFF;
1377	} else {
1378		len = 6;
1379
1380		pa0_buff[0] = 0xFF;
1381		pa0_buff[1] = 0xFE;
1382		pa0_buff[2] = (header_size >> 24) & 0xFF;
1383		pa0_buff[3] = (header_size >> 16) & 0xFF;
1384		pa0_buff[4] = (header_size >> 8) & 0xFF;
1385		pa0_buff[5] = header_size & 0xFF;
1386	}
1387
1388	return len;
1389}
1390
1391static int set_msg_len(u8 *block, unsigned int msglen, unsigned int csize)
1392{
1393	__be32 data;
1394
1395	memset(block, 0, csize);
1396	block += csize;
1397
1398	if (csize >= 4)
1399		csize = 4;
1400	else if (msglen > (1 << (8 * csize)))
1401		return -EOVERFLOW;
1402
1403	data = cpu_to_be32(msglen);
1404	memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
1405
1406	return 0;
1407}
1408
1409static int cc_ccm(struct aead_request *req, struct cc_hw_desc desc[],
1410		  unsigned int *seq_size)
1411{
1412	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1413	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1414	struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req);
1415	unsigned int idx = *seq_size;
1416	unsigned int cipher_flow_mode;
1417	dma_addr_t mac_result;
1418
1419	if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1420		cipher_flow_mode = AES_to_HASH_and_DOUT;
1421		mac_result = req_ctx->mac_buf_dma_addr;
1422	} else { /* Encrypt */
1423		cipher_flow_mode = AES_and_HASH;
1424		mac_result = req_ctx->icv_dma_addr;
1425	}
1426
1427	/* load key */
1428	hw_desc_init(&desc[idx]);
1429	set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1430	set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1431		     ((ctx->enc_keylen == 24) ?  CC_AES_KEY_SIZE_MAX :
1432		      ctx->enc_keylen), NS_BIT);
1433	set_key_size_aes(&desc[idx], ctx->enc_keylen);
1434	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1435	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1436	set_flow_mode(&desc[idx], S_DIN_to_AES);
1437	idx++;
1438
1439	/* load ctr state */
1440	hw_desc_init(&desc[idx]);
1441	set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1442	set_key_size_aes(&desc[idx], ctx->enc_keylen);
1443	set_din_type(&desc[idx], DMA_DLLI,
1444		     req_ctx->gen_ctx.iv_dma_addr, AES_BLOCK_SIZE, NS_BIT);
1445	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1446	set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1447	set_flow_mode(&desc[idx], S_DIN_to_AES);
1448	idx++;
1449
1450	/* load MAC key */
1451	hw_desc_init(&desc[idx]);
1452	set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1453	set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1454		     ((ctx->enc_keylen == 24) ?  CC_AES_KEY_SIZE_MAX :
1455		      ctx->enc_keylen), NS_BIT);
1456	set_key_size_aes(&desc[idx], ctx->enc_keylen);
1457	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1458	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1459	set_flow_mode(&desc[idx], S_DIN_to_HASH);
1460	set_aes_not_hash_mode(&desc[idx]);
1461	idx++;
1462
1463	/* load MAC state */
1464	hw_desc_init(&desc[idx]);
1465	set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1466	set_key_size_aes(&desc[idx], ctx->enc_keylen);
1467	set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1468		     AES_BLOCK_SIZE, NS_BIT);
1469	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1470	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1471	set_flow_mode(&desc[idx], S_DIN_to_HASH);
1472	set_aes_not_hash_mode(&desc[idx]);
1473	idx++;
1474
1475	/* process assoc data */
1476	if (req_ctx->assoclen > 0) {
1477		cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
1478	} else {
1479		hw_desc_init(&desc[idx]);
1480		set_din_type(&desc[idx], DMA_DLLI,
1481			     sg_dma_address(&req_ctx->ccm_adata_sg),
1482			     AES_BLOCK_SIZE + req_ctx->ccm_hdr_size, NS_BIT);
1483		set_flow_mode(&desc[idx], DIN_HASH);
1484		idx++;
1485	}
1486
1487	/* process the cipher */
1488	if (req_ctx->cryptlen)
1489		cc_proc_cipher_desc(req, cipher_flow_mode, desc, &idx);
1490
1491	/* Read temporal MAC */
1492	hw_desc_init(&desc[idx]);
1493	set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1494	set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, ctx->authsize,
1495		      NS_BIT, 0);
1496	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1497	set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
1498	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1499	set_aes_not_hash_mode(&desc[idx]);
1500	idx++;
1501
1502	/* load AES-CTR state (for last MAC calculation)*/
1503	hw_desc_init(&desc[idx]);
1504	set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1505	set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1506	set_din_type(&desc[idx], DMA_DLLI, req_ctx->ccm_iv0_dma_addr,
1507		     AES_BLOCK_SIZE, NS_BIT);
1508	set_key_size_aes(&desc[idx], ctx->enc_keylen);
1509	set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1510	set_flow_mode(&desc[idx], S_DIN_to_AES);
1511	idx++;
1512
1513	hw_desc_init(&desc[idx]);
1514	set_din_no_dma(&desc[idx], 0, 0xfffff0);
1515	set_dout_no_dma(&desc[idx], 0, 0, 1);
1516	idx++;
1517
1518	/* encrypt the "T" value and store MAC in mac_state */
1519	hw_desc_init(&desc[idx]);
1520	set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1521		     ctx->authsize, NS_BIT);
1522	set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
1523	set_queue_last_ind(ctx->drvdata, &desc[idx]);
1524	set_flow_mode(&desc[idx], DIN_AES_DOUT);
1525	idx++;
1526
1527	*seq_size = idx;
1528	return 0;
1529}
1530
1531static int config_ccm_adata(struct aead_request *req)
1532{
1533	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1534	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1535	struct device *dev = drvdata_to_dev(ctx->drvdata);
1536	struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req);
1537	//unsigned int size_of_a = 0, rem_a_size = 0;
1538	unsigned int lp = req->iv[0];
1539	/* Note: The code assume that req->iv[0] already contains the value
1540	 * of L' of RFC3610
1541	 */
1542	unsigned int l = lp + 1;  /* This is L' of RFC 3610. */
1543	unsigned int m = ctx->authsize;  /* This is M' of RFC 3610. */
1544	u8 *b0 = req_ctx->ccm_config + CCM_B0_OFFSET;
1545	u8 *a0 = req_ctx->ccm_config + CCM_A0_OFFSET;
1546	u8 *ctr_count_0 = req_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
1547	unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
1548				 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1549				req->cryptlen :
1550				(req->cryptlen - ctx->authsize);
1551	int rc;
1552
1553	memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
1554	memset(req_ctx->ccm_config, 0, AES_BLOCK_SIZE * 3);
1555
1556	/* taken from crypto/ccm.c */
1557	/* 2 <= L <= 8, so 1 <= L' <= 7. */
1558	if (l < 2 || l > 8) {
1559		dev_dbg(dev, "illegal iv value %X\n", req->iv[0]);
1560		return -EINVAL;
1561	}
1562	memcpy(b0, req->iv, AES_BLOCK_SIZE);
1563
1564	/* format control info per RFC 3610 and
1565	 * NIST Special Publication 800-38C
1566	 */
1567	*b0 |= (8 * ((m - 2) / 2));
1568	if (req_ctx->assoclen > 0)
1569		*b0 |= 64;  /* Enable bit 6 if Adata exists. */
1570
1571	rc = set_msg_len(b0 + 16 - l, cryptlen, l);  /* Write L'. */
1572	if (rc) {
1573		dev_err(dev, "message len overflow detected");
1574		return rc;
1575	}
1576	 /* END of "taken from crypto/ccm.c" */
1577
1578	/* l(a) - size of associated data. */
1579	req_ctx->ccm_hdr_size = format_ccm_a0(a0, req_ctx->assoclen);
1580
1581	memset(req->iv + 15 - req->iv[0], 0, req->iv[0] + 1);
1582	req->iv[15] = 1;
1583
1584	memcpy(ctr_count_0, req->iv, AES_BLOCK_SIZE);
1585	ctr_count_0[15] = 0;
1586
1587	return 0;
1588}
1589
1590static void cc_proc_rfc4309_ccm(struct aead_request *req)
1591{
1592	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1593	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1594	struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
1595
1596	/* L' */
1597	memset(areq_ctx->ctr_iv, 0, AES_BLOCK_SIZE);
1598	/* For RFC 4309, always use 4 bytes for message length
1599	 * (at most 2^32-1 bytes).
1600	 */
1601	areq_ctx->ctr_iv[0] = 3;
1602
1603	/* In RFC 4309 there is an 11-bytes nonce+IV part,
1604	 * that we build here.
1605	 */
1606	memcpy(areq_ctx->ctr_iv + CCM_BLOCK_NONCE_OFFSET, ctx->ctr_nonce,
1607	       CCM_BLOCK_NONCE_SIZE);
1608	memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv,
1609	       CCM_BLOCK_IV_SIZE);
1610	req->iv = areq_ctx->ctr_iv;
 
1611}
1612
1613static void cc_set_ghash_desc(struct aead_request *req,
1614			      struct cc_hw_desc desc[], unsigned int *seq_size)
1615{
1616	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1617	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1618	struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req);
1619	unsigned int idx = *seq_size;
1620
1621	/* load key to AES*/
1622	hw_desc_init(&desc[idx]);
1623	set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
1624	set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1625	set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1626		     ctx->enc_keylen, NS_BIT);
1627	set_key_size_aes(&desc[idx], ctx->enc_keylen);
1628	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1629	set_flow_mode(&desc[idx], S_DIN_to_AES);
1630	idx++;
1631
1632	/* process one zero block to generate hkey */
1633	hw_desc_init(&desc[idx]);
1634	set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
1635	set_dout_dlli(&desc[idx], req_ctx->hkey_dma_addr, AES_BLOCK_SIZE,
1636		      NS_BIT, 0);
1637	set_flow_mode(&desc[idx], DIN_AES_DOUT);
1638	idx++;
1639
1640	/* Memory Barrier */
1641	hw_desc_init(&desc[idx]);
1642	set_din_no_dma(&desc[idx], 0, 0xfffff0);
1643	set_dout_no_dma(&desc[idx], 0, 0, 1);
1644	idx++;
1645
1646	/* Load GHASH subkey */
1647	hw_desc_init(&desc[idx]);
1648	set_din_type(&desc[idx], DMA_DLLI, req_ctx->hkey_dma_addr,
1649		     AES_BLOCK_SIZE, NS_BIT);
1650	set_dout_no_dma(&desc[idx], 0, 0, 1);
1651	set_flow_mode(&desc[idx], S_DIN_to_HASH);
1652	set_aes_not_hash_mode(&desc[idx]);
1653	set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1654	set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1655	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1656	idx++;
1657
1658	/* Configure Hash Engine to work with GHASH.
1659	 * Since it was not possible to extend HASH submodes to add GHASH,
1660	 * The following command is necessary in order to
1661	 * select GHASH (according to HW designers)
1662	 */
1663	hw_desc_init(&desc[idx]);
1664	set_din_no_dma(&desc[idx], 0, 0xfffff0);
1665	set_dout_no_dma(&desc[idx], 0, 0, 1);
1666	set_flow_mode(&desc[idx], S_DIN_to_HASH);
1667	set_aes_not_hash_mode(&desc[idx]);
1668	set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1669	set_cipher_do(&desc[idx], 1); //1=AES_SK RKEK
1670	set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1671	set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1672	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1673	idx++;
1674
1675	/* Load GHASH initial STATE (which is 0). (for any hash there is an
1676	 * initial state)
1677	 */
1678	hw_desc_init(&desc[idx]);
1679	set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
1680	set_dout_no_dma(&desc[idx], 0, 0, 1);
1681	set_flow_mode(&desc[idx], S_DIN_to_HASH);
1682	set_aes_not_hash_mode(&desc[idx]);
1683	set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1684	set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1685	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1686	idx++;
1687
1688	*seq_size = idx;
1689}
1690
1691static void cc_set_gctr_desc(struct aead_request *req, struct cc_hw_desc desc[],
1692			     unsigned int *seq_size)
1693{
1694	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1695	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1696	struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req);
1697	unsigned int idx = *seq_size;
1698
1699	/* load key to AES*/
1700	hw_desc_init(&desc[idx]);
1701	set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1702	set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1703	set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1704		     ctx->enc_keylen, NS_BIT);
1705	set_key_size_aes(&desc[idx], ctx->enc_keylen);
1706	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1707	set_flow_mode(&desc[idx], S_DIN_to_AES);
1708	idx++;
1709
1710	if (req_ctx->cryptlen && !req_ctx->plaintext_authenticate_only) {
1711		/* load AES/CTR initial CTR value inc by 2*/
1712		hw_desc_init(&desc[idx]);
1713		set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1714		set_key_size_aes(&desc[idx], ctx->enc_keylen);
1715		set_din_type(&desc[idx], DMA_DLLI,
1716			     req_ctx->gcm_iv_inc2_dma_addr, AES_BLOCK_SIZE,
1717			     NS_BIT);
1718		set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1719		set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1720		set_flow_mode(&desc[idx], S_DIN_to_AES);
1721		idx++;
1722	}
1723
1724	*seq_size = idx;
1725}
1726
1727static void cc_proc_gcm_result(struct aead_request *req,
1728			       struct cc_hw_desc desc[],
1729			       unsigned int *seq_size)
1730{
1731	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1732	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1733	struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req);
1734	dma_addr_t mac_result;
1735	unsigned int idx = *seq_size;
1736
1737	if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1738		mac_result = req_ctx->mac_buf_dma_addr;
1739	} else { /* Encrypt */
1740		mac_result = req_ctx->icv_dma_addr;
1741	}
1742
1743	/* process(ghash) gcm_block_len */
1744	hw_desc_init(&desc[idx]);
1745	set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_block_len_dma_addr,
1746		     AES_BLOCK_SIZE, NS_BIT);
1747	set_flow_mode(&desc[idx], DIN_HASH);
1748	idx++;
1749
1750	/* Store GHASH state after GHASH(Associated Data + Cipher +LenBlock) */
1751	hw_desc_init(&desc[idx]);
1752	set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1753	set_din_no_dma(&desc[idx], 0, 0xfffff0);
1754	set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, AES_BLOCK_SIZE,
1755		      NS_BIT, 0);
1756	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1757	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1758	set_aes_not_hash_mode(&desc[idx]);
1759
1760	idx++;
1761
1762	/* load AES/CTR initial CTR value inc by 1*/
1763	hw_desc_init(&desc[idx]);
1764	set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1765	set_key_size_aes(&desc[idx], ctx->enc_keylen);
1766	set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_iv_inc1_dma_addr,
1767		     AES_BLOCK_SIZE, NS_BIT);
1768	set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1769	set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1770	set_flow_mode(&desc[idx], S_DIN_to_AES);
1771	idx++;
1772
1773	/* Memory Barrier */
1774	hw_desc_init(&desc[idx]);
1775	set_din_no_dma(&desc[idx], 0, 0xfffff0);
1776	set_dout_no_dma(&desc[idx], 0, 0, 1);
1777	idx++;
1778
1779	/* process GCTR on stored GHASH and store MAC in mac_state*/
1780	hw_desc_init(&desc[idx]);
1781	set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1782	set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1783		     AES_BLOCK_SIZE, NS_BIT);
1784	set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
1785	set_queue_last_ind(ctx->drvdata, &desc[idx]);
1786	set_flow_mode(&desc[idx], DIN_AES_DOUT);
1787	idx++;
1788
1789	*seq_size = idx;
1790}
1791
1792static int cc_gcm(struct aead_request *req, struct cc_hw_desc desc[],
1793		  unsigned int *seq_size)
1794{
1795	struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req);
1796	unsigned int cipher_flow_mode;
1797
 
 
 
 
 
 
1798	//in RFC4543 no data to encrypt. just copy data from src to dest.
1799	if (req_ctx->plaintext_authenticate_only) {
1800		cc_proc_cipher_desc(req, BYPASS, desc, seq_size);
1801		cc_set_ghash_desc(req, desc, seq_size);
1802		/* process(ghash) assoc data */
1803		cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
1804		cc_set_gctr_desc(req, desc, seq_size);
1805		cc_proc_gcm_result(req, desc, seq_size);
1806		return 0;
1807	}
1808
1809	if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1810		cipher_flow_mode = AES_and_HASH;
1811	} else { /* Encrypt */
1812		cipher_flow_mode = AES_to_HASH_and_DOUT;
1813	}
1814
1815	// for gcm and rfc4106.
1816	cc_set_ghash_desc(req, desc, seq_size);
1817	/* process(ghash) assoc data */
1818	if (req_ctx->assoclen > 0)
1819		cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
1820	cc_set_gctr_desc(req, desc, seq_size);
1821	/* process(gctr+ghash) */
1822	if (req_ctx->cryptlen)
1823		cc_proc_cipher_desc(req, cipher_flow_mode, desc, seq_size);
1824	cc_proc_gcm_result(req, desc, seq_size);
1825
1826	return 0;
1827}
1828
1829static int config_gcm_context(struct aead_request *req)
1830{
1831	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1832	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1833	struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req);
1834	struct device *dev = drvdata_to_dev(ctx->drvdata);
1835
1836	unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
1837				 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1838				req->cryptlen :
1839				(req->cryptlen - ctx->authsize);
1840	__be32 counter = cpu_to_be32(2);
1841
1842	dev_dbg(dev, "%s() cryptlen = %d, req_ctx->assoclen = %d ctx->authsize = %d\n",
1843		__func__, cryptlen, req_ctx->assoclen, ctx->authsize);
1844
1845	memset(req_ctx->hkey, 0, AES_BLOCK_SIZE);
1846
1847	memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
1848
1849	memcpy(req->iv + 12, &counter, 4);
1850	memcpy(req_ctx->gcm_iv_inc2, req->iv, 16);
1851
1852	counter = cpu_to_be32(1);
1853	memcpy(req->iv + 12, &counter, 4);
1854	memcpy(req_ctx->gcm_iv_inc1, req->iv, 16);
1855
1856	if (!req_ctx->plaintext_authenticate_only) {
1857		__be64 temp64;
1858
1859		temp64 = cpu_to_be64(req_ctx->assoclen * 8);
1860		memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
1861		temp64 = cpu_to_be64(cryptlen * 8);
1862		memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
1863	} else {
1864		/* rfc4543=>  all data(AAD,IV,Plain) are considered additional
1865		 * data that is nothing is encrypted.
1866		 */
1867		__be64 temp64;
1868
1869		temp64 = cpu_to_be64((req_ctx->assoclen + cryptlen) * 8);
 
1870		memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
1871		temp64 = 0;
1872		memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
1873	}
1874
1875	return 0;
1876}
1877
1878static void cc_proc_rfc4_gcm(struct aead_request *req)
1879{
1880	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1881	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1882	struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
1883
1884	memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_NONCE_OFFSET,
1885	       ctx->ctr_nonce, GCM_BLOCK_RFC4_NONCE_SIZE);
1886	memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv,
1887	       GCM_BLOCK_RFC4_IV_SIZE);
1888	req->iv = areq_ctx->ctr_iv;
 
1889}
1890
1891static int cc_proc_aead(struct aead_request *req,
1892			enum drv_crypto_direction direct)
1893{
1894	int rc = 0;
1895	int seq_len = 0;
1896	struct cc_hw_desc desc[MAX_AEAD_PROCESS_SEQ];
1897	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1898	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1899	struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
1900	struct device *dev = drvdata_to_dev(ctx->drvdata);
1901	struct cc_crypto_req cc_req = {};
1902
1903	dev_dbg(dev, "%s context=%p req=%p iv=%p src=%p src_ofs=%d dst=%p dst_ofs=%d cryptolen=%d\n",
1904		((direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Enc" : "Dec"),
1905		ctx, req, req->iv, sg_virt(req->src), req->src->offset,
1906		sg_virt(req->dst), req->dst->offset, req->cryptlen);
1907
1908	/* STAT_PHASE_0: Init and sanity checks */
1909
1910	/* Check data length according to mode */
1911	if (validate_data_size(ctx, direct, req)) {
1912		dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n",
1913			req->cryptlen, areq_ctx->assoclen);
 
1914		return -EINVAL;
1915	}
1916
1917	/* Setup request structure */
1918	cc_req.user_cb = cc_aead_complete;
1919	cc_req.user_arg = req;
1920
1921	/* Setup request context */
1922	areq_ctx->gen_ctx.op_type = direct;
1923	areq_ctx->req_authsize = ctx->authsize;
1924	areq_ctx->cipher_mode = ctx->cipher_mode;
1925
1926	/* STAT_PHASE_1: Map buffers */
1927
1928	if (ctx->cipher_mode == DRV_CIPHER_CTR) {
1929		/* Build CTR IV - Copy nonce from last 4 bytes in
1930		 * CTR key to first 4 bytes in CTR IV
1931		 */
1932		memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce,
1933		       CTR_RFC3686_NONCE_SIZE);
1934		memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE, req->iv,
1935		       CTR_RFC3686_IV_SIZE);
 
1936		/* Initialize counter portion of counter block */
1937		*(__be32 *)(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE +
1938			    CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1939
1940		/* Replace with counter iv */
1941		req->iv = areq_ctx->ctr_iv;
1942		areq_ctx->hw_iv_size = CTR_RFC3686_BLOCK_SIZE;
1943	} else if ((ctx->cipher_mode == DRV_CIPHER_CCM) ||
1944		   (ctx->cipher_mode == DRV_CIPHER_GCTR)) {
1945		areq_ctx->hw_iv_size = AES_BLOCK_SIZE;
1946		if (areq_ctx->ctr_iv != req->iv) {
1947			memcpy(areq_ctx->ctr_iv, req->iv,
1948			       crypto_aead_ivsize(tfm));
1949			req->iv = areq_ctx->ctr_iv;
1950		}
1951	}  else {
1952		areq_ctx->hw_iv_size = crypto_aead_ivsize(tfm);
1953	}
1954
1955	if (ctx->cipher_mode == DRV_CIPHER_CCM) {
1956		rc = config_ccm_adata(req);
1957		if (rc) {
1958			dev_dbg(dev, "config_ccm_adata() returned with a failure %d!",
1959				rc);
1960			goto exit;
1961		}
1962	} else {
1963		areq_ctx->ccm_hdr_size = ccm_header_size_null;
1964	}
1965
1966	if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
1967		rc = config_gcm_context(req);
1968		if (rc) {
1969			dev_dbg(dev, "config_gcm_context() returned with a failure %d!",
1970				rc);
1971			goto exit;
1972		}
1973	}
1974
1975	rc = cc_map_aead_request(ctx->drvdata, req);
1976	if (rc) {
1977		dev_err(dev, "map_request() failed\n");
1978		goto exit;
1979	}
1980
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1981	/* STAT_PHASE_2: Create sequence */
1982
1983	/* Load MLLI tables to SRAM if necessary */
1984	cc_mlli_to_sram(req, desc, &seq_len);
1985
 
1986	switch (ctx->auth_mode) {
1987	case DRV_HASH_SHA1:
1988	case DRV_HASH_SHA256:
1989		cc_hmac_authenc(req, desc, &seq_len);
1990		break;
1991	case DRV_HASH_XCBC_MAC:
1992		cc_xcbc_authenc(req, desc, &seq_len);
1993		break;
1994	case DRV_HASH_NULL:
1995		if (ctx->cipher_mode == DRV_CIPHER_CCM)
1996			cc_ccm(req, desc, &seq_len);
1997		if (ctx->cipher_mode == DRV_CIPHER_GCTR)
1998			cc_gcm(req, desc, &seq_len);
1999		break;
2000	default:
2001		dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
2002		cc_unmap_aead_request(dev, req);
2003		rc = -ENOTSUPP;
2004		goto exit;
2005	}
2006
2007	/* STAT_PHASE_3: Lock HW and push sequence */
2008
2009	rc = cc_send_request(ctx->drvdata, &cc_req, desc, seq_len, &req->base);
2010
2011	if (rc != -EINPROGRESS && rc != -EBUSY) {
2012		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
2013		cc_unmap_aead_request(dev, req);
2014	}
2015
2016exit:
2017	return rc;
2018}
2019
2020static int cc_aead_encrypt(struct aead_request *req)
2021{
2022	struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
2023	int rc;
2024
2025	memset(areq_ctx, 0, sizeof(*areq_ctx));
2026
2027	/* No generated IV required */
2028	areq_ctx->backup_iv = req->iv;
2029	areq_ctx->assoclen = req->assoclen;
 
 
 
2030
2031	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2032	if (rc != -EINPROGRESS && rc != -EBUSY)
2033		req->iv = areq_ctx->backup_iv;
2034
2035	return rc;
2036}
2037
2038static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
2039{
2040	/* Very similar to cc_aead_encrypt() above. */
2041
2042	struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
2043	int rc;
 
 
 
2044
2045	rc = crypto_ipsec_check_assoclen(req->assoclen);
2046	if (rc)
2047		goto out;
2048
2049	memset(areq_ctx, 0, sizeof(*areq_ctx));
2050
2051	/* No generated IV required */
2052	areq_ctx->backup_iv = req->iv;
2053	areq_ctx->assoclen = req->assoclen - CCM_BLOCK_IV_SIZE;
 
2054
2055	cc_proc_rfc4309_ccm(req);
2056
2057	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2058	if (rc != -EINPROGRESS && rc != -EBUSY)
2059		req->iv = areq_ctx->backup_iv;
2060out:
2061	return rc;
2062}
2063
2064static int cc_aead_decrypt(struct aead_request *req)
2065{
2066	struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
2067	int rc;
2068
2069	memset(areq_ctx, 0, sizeof(*areq_ctx));
2070
2071	/* No generated IV required */
2072	areq_ctx->backup_iv = req->iv;
2073	areq_ctx->assoclen = req->assoclen;
 
 
 
2074
2075	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2076	if (rc != -EINPROGRESS && rc != -EBUSY)
2077		req->iv = areq_ctx->backup_iv;
2078
2079	return rc;
2080}
2081
2082static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
2083{
2084	struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
2085	int rc;
 
 
 
2086
2087	rc = crypto_ipsec_check_assoclen(req->assoclen);
2088	if (rc)
2089		goto out;
2090
2091	memset(areq_ctx, 0, sizeof(*areq_ctx));
2092
2093	/* No generated IV required */
2094	areq_ctx->backup_iv = req->iv;
2095	areq_ctx->assoclen = req->assoclen - CCM_BLOCK_IV_SIZE;
2096
 
2097	cc_proc_rfc4309_ccm(req);
2098
2099	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2100	if (rc != -EINPROGRESS && rc != -EBUSY)
2101		req->iv = areq_ctx->backup_iv;
2102
2103out:
2104	return rc;
2105}
2106
2107static int cc_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
2108				 unsigned int keylen)
2109{
2110	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2111	struct device *dev = drvdata_to_dev(ctx->drvdata);
2112
2113	dev_dbg(dev, "%s()  keylen %d, key %p\n", __func__, keylen, key);
2114
2115	if (keylen < 4)
2116		return -EINVAL;
2117
2118	keylen -= 4;
2119	memcpy(ctx->ctr_nonce, key + keylen, 4);
2120
2121	return cc_aead_setkey(tfm, key, keylen);
2122}
2123
2124static int cc_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
2125				 unsigned int keylen)
2126{
2127	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2128	struct device *dev = drvdata_to_dev(ctx->drvdata);
2129
2130	dev_dbg(dev, "%s()  keylen %d, key %p\n", __func__, keylen, key);
2131
2132	if (keylen < 4)
2133		return -EINVAL;
2134
2135	keylen -= 4;
2136	memcpy(ctx->ctr_nonce, key + keylen, 4);
2137
2138	return cc_aead_setkey(tfm, key, keylen);
2139}
2140
2141static int cc_gcm_setauthsize(struct crypto_aead *authenc,
2142			      unsigned int authsize)
2143{
2144	switch (authsize) {
2145	case 4:
2146	case 8:
2147	case 12:
2148	case 13:
2149	case 14:
2150	case 15:
2151	case 16:
2152		break;
2153	default:
2154		return -EINVAL;
2155	}
2156
2157	return cc_aead_setauthsize(authenc, authsize);
2158}
2159
2160static int cc_rfc4106_gcm_setauthsize(struct crypto_aead *authenc,
2161				      unsigned int authsize)
2162{
2163	struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
2164	struct device *dev = drvdata_to_dev(ctx->drvdata);
2165
2166	dev_dbg(dev, "authsize %d\n", authsize);
2167
2168	switch (authsize) {
2169	case 8:
2170	case 12:
2171	case 16:
2172		break;
2173	default:
2174		return -EINVAL;
2175	}
2176
2177	return cc_aead_setauthsize(authenc, authsize);
2178}
2179
2180static int cc_rfc4543_gcm_setauthsize(struct crypto_aead *authenc,
2181				      unsigned int authsize)
2182{
2183	struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
2184	struct device *dev = drvdata_to_dev(ctx->drvdata);
2185
2186	dev_dbg(dev, "authsize %d\n", authsize);
2187
2188	if (authsize != 16)
2189		return -EINVAL;
2190
2191	return cc_aead_setauthsize(authenc, authsize);
2192}
2193
2194static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
2195{
2196	struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
2197	int rc;
2198
2199	rc = crypto_ipsec_check_assoclen(req->assoclen);
2200	if (rc)
2201		goto out;
 
 
2202
2203	memset(areq_ctx, 0, sizeof(*areq_ctx));
 
 
 
2204
2205	/* No generated IV required */
2206	areq_ctx->backup_iv = req->iv;
2207	areq_ctx->assoclen = req->assoclen - GCM_BLOCK_RFC4_IV_SIZE;
 
 
2208
2209	cc_proc_rfc4_gcm(req);
 
2210
2211	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2212	if (rc != -EINPROGRESS && rc != -EBUSY)
2213		req->iv = areq_ctx->backup_iv;
2214out:
2215	return rc;
2216}
2217
2218static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
2219{
2220	struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
2221	int rc;
2222
2223	rc = crypto_ipsec_check_assoclen(req->assoclen);
2224	if (rc)
2225		goto out;
2226
2227	memset(areq_ctx, 0, sizeof(*areq_ctx));
 
2228
2229	//plaintext is not encryped with rfc4543
2230	areq_ctx->plaintext_authenticate_only = true;
2231
2232	/* No generated IV required */
2233	areq_ctx->backup_iv = req->iv;
2234	areq_ctx->assoclen = req->assoclen;
2235
2236	cc_proc_rfc4_gcm(req);
 
2237
2238	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2239	if (rc != -EINPROGRESS && rc != -EBUSY)
2240		req->iv = areq_ctx->backup_iv;
2241out:
2242	return rc;
2243}
2244
2245static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
2246{
2247	struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
2248	int rc;
2249
2250	rc = crypto_ipsec_check_assoclen(req->assoclen);
2251	if (rc)
2252		goto out;
 
 
2253
2254	memset(areq_ctx, 0, sizeof(*areq_ctx));
 
 
 
2255
2256	/* No generated IV required */
2257	areq_ctx->backup_iv = req->iv;
2258	areq_ctx->assoclen = req->assoclen - GCM_BLOCK_RFC4_IV_SIZE;
 
 
2259
2260	cc_proc_rfc4_gcm(req);
 
2261
2262	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2263	if (rc != -EINPROGRESS && rc != -EBUSY)
2264		req->iv = areq_ctx->backup_iv;
2265out:
2266	return rc;
2267}
2268
2269static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
2270{
2271	struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
2272	int rc;
2273
2274	rc = crypto_ipsec_check_assoclen(req->assoclen);
2275	if (rc)
2276		goto out;
2277
2278	memset(areq_ctx, 0, sizeof(*areq_ctx));
2279
2280	//plaintext is not decryped with rfc4543
2281	areq_ctx->plaintext_authenticate_only = true;
2282
2283	/* No generated IV required */
2284	areq_ctx->backup_iv = req->iv;
2285	areq_ctx->assoclen = req->assoclen;
2286
2287	cc_proc_rfc4_gcm(req);
 
2288
2289	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2290	if (rc != -EINPROGRESS && rc != -EBUSY)
2291		req->iv = areq_ctx->backup_iv;
2292out:
2293	return rc;
2294}
2295
2296/* aead alg */
2297static struct cc_alg_template aead_algs[] = {
2298	{
2299		.name = "authenc(hmac(sha1),cbc(aes))",
2300		.driver_name = "authenc-hmac-sha1-cbc-aes-ccree",
2301		.blocksize = AES_BLOCK_SIZE,
 
2302		.template_aead = {
2303			.setkey = cc_aead_setkey,
2304			.setauthsize = cc_aead_setauthsize,
2305			.encrypt = cc_aead_encrypt,
2306			.decrypt = cc_aead_decrypt,
2307			.init = cc_aead_init,
2308			.exit = cc_aead_exit,
2309			.ivsize = AES_BLOCK_SIZE,
2310			.maxauthsize = SHA1_DIGEST_SIZE,
2311		},
2312		.cipher_mode = DRV_CIPHER_CBC,
2313		.flow_mode = S_DIN_to_AES,
2314		.auth_mode = DRV_HASH_SHA1,
2315		.min_hw_rev = CC_HW_REV_630,
2316		.std_body = CC_STD_NIST,
2317	},
2318	{
2319		.name = "authenc(hmac(sha1),cbc(des3_ede))",
2320		.driver_name = "authenc-hmac-sha1-cbc-des3-ccree",
2321		.blocksize = DES3_EDE_BLOCK_SIZE,
 
2322		.template_aead = {
2323			.setkey = cc_des3_aead_setkey,
2324			.setauthsize = cc_aead_setauthsize,
2325			.encrypt = cc_aead_encrypt,
2326			.decrypt = cc_aead_decrypt,
2327			.init = cc_aead_init,
2328			.exit = cc_aead_exit,
2329			.ivsize = DES3_EDE_BLOCK_SIZE,
2330			.maxauthsize = SHA1_DIGEST_SIZE,
2331		},
2332		.cipher_mode = DRV_CIPHER_CBC,
2333		.flow_mode = S_DIN_to_DES,
2334		.auth_mode = DRV_HASH_SHA1,
2335		.min_hw_rev = CC_HW_REV_630,
2336		.std_body = CC_STD_NIST,
2337	},
2338	{
2339		.name = "authenc(hmac(sha256),cbc(aes))",
2340		.driver_name = "authenc-hmac-sha256-cbc-aes-ccree",
2341		.blocksize = AES_BLOCK_SIZE,
 
2342		.template_aead = {
2343			.setkey = cc_aead_setkey,
2344			.setauthsize = cc_aead_setauthsize,
2345			.encrypt = cc_aead_encrypt,
2346			.decrypt = cc_aead_decrypt,
2347			.init = cc_aead_init,
2348			.exit = cc_aead_exit,
2349			.ivsize = AES_BLOCK_SIZE,
2350			.maxauthsize = SHA256_DIGEST_SIZE,
2351		},
2352		.cipher_mode = DRV_CIPHER_CBC,
2353		.flow_mode = S_DIN_to_AES,
2354		.auth_mode = DRV_HASH_SHA256,
2355		.min_hw_rev = CC_HW_REV_630,
2356		.std_body = CC_STD_NIST,
2357	},
2358	{
2359		.name = "authenc(hmac(sha256),cbc(des3_ede))",
2360		.driver_name = "authenc-hmac-sha256-cbc-des3-ccree",
2361		.blocksize = DES3_EDE_BLOCK_SIZE,
 
2362		.template_aead = {
2363			.setkey = cc_des3_aead_setkey,
2364			.setauthsize = cc_aead_setauthsize,
2365			.encrypt = cc_aead_encrypt,
2366			.decrypt = cc_aead_decrypt,
2367			.init = cc_aead_init,
2368			.exit = cc_aead_exit,
2369			.ivsize = DES3_EDE_BLOCK_SIZE,
2370			.maxauthsize = SHA256_DIGEST_SIZE,
2371		},
2372		.cipher_mode = DRV_CIPHER_CBC,
2373		.flow_mode = S_DIN_to_DES,
2374		.auth_mode = DRV_HASH_SHA256,
2375		.min_hw_rev = CC_HW_REV_630,
2376		.std_body = CC_STD_NIST,
2377	},
2378	{
2379		.name = "authenc(xcbc(aes),cbc(aes))",
2380		.driver_name = "authenc-xcbc-aes-cbc-aes-ccree",
2381		.blocksize = AES_BLOCK_SIZE,
 
2382		.template_aead = {
2383			.setkey = cc_aead_setkey,
2384			.setauthsize = cc_aead_setauthsize,
2385			.encrypt = cc_aead_encrypt,
2386			.decrypt = cc_aead_decrypt,
2387			.init = cc_aead_init,
2388			.exit = cc_aead_exit,
2389			.ivsize = AES_BLOCK_SIZE,
2390			.maxauthsize = AES_BLOCK_SIZE,
2391		},
2392		.cipher_mode = DRV_CIPHER_CBC,
2393		.flow_mode = S_DIN_to_AES,
2394		.auth_mode = DRV_HASH_XCBC_MAC,
2395		.min_hw_rev = CC_HW_REV_630,
2396		.std_body = CC_STD_NIST,
2397	},
2398	{
2399		.name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
2400		.driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-ccree",
2401		.blocksize = 1,
 
2402		.template_aead = {
2403			.setkey = cc_aead_setkey,
2404			.setauthsize = cc_aead_setauthsize,
2405			.encrypt = cc_aead_encrypt,
2406			.decrypt = cc_aead_decrypt,
2407			.init = cc_aead_init,
2408			.exit = cc_aead_exit,
2409			.ivsize = CTR_RFC3686_IV_SIZE,
2410			.maxauthsize = SHA1_DIGEST_SIZE,
2411		},
2412		.cipher_mode = DRV_CIPHER_CTR,
2413		.flow_mode = S_DIN_to_AES,
2414		.auth_mode = DRV_HASH_SHA1,
2415		.min_hw_rev = CC_HW_REV_630,
2416		.std_body = CC_STD_NIST,
2417	},
2418	{
2419		.name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
2420		.driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-ccree",
2421		.blocksize = 1,
 
2422		.template_aead = {
2423			.setkey = cc_aead_setkey,
2424			.setauthsize = cc_aead_setauthsize,
2425			.encrypt = cc_aead_encrypt,
2426			.decrypt = cc_aead_decrypt,
2427			.init = cc_aead_init,
2428			.exit = cc_aead_exit,
2429			.ivsize = CTR_RFC3686_IV_SIZE,
2430			.maxauthsize = SHA256_DIGEST_SIZE,
2431		},
2432		.cipher_mode = DRV_CIPHER_CTR,
2433		.flow_mode = S_DIN_to_AES,
2434		.auth_mode = DRV_HASH_SHA256,
2435		.min_hw_rev = CC_HW_REV_630,
2436		.std_body = CC_STD_NIST,
2437	},
2438	{
2439		.name = "authenc(xcbc(aes),rfc3686(ctr(aes)))",
2440		.driver_name = "authenc-xcbc-aes-rfc3686-ctr-aes-ccree",
2441		.blocksize = 1,
 
2442		.template_aead = {
2443			.setkey = cc_aead_setkey,
2444			.setauthsize = cc_aead_setauthsize,
2445			.encrypt = cc_aead_encrypt,
2446			.decrypt = cc_aead_decrypt,
2447			.init = cc_aead_init,
2448			.exit = cc_aead_exit,
2449			.ivsize = CTR_RFC3686_IV_SIZE,
2450			.maxauthsize = AES_BLOCK_SIZE,
2451		},
2452		.cipher_mode = DRV_CIPHER_CTR,
2453		.flow_mode = S_DIN_to_AES,
2454		.auth_mode = DRV_HASH_XCBC_MAC,
2455		.min_hw_rev = CC_HW_REV_630,
2456		.std_body = CC_STD_NIST,
2457	},
2458	{
2459		.name = "ccm(aes)",
2460		.driver_name = "ccm-aes-ccree",
2461		.blocksize = 1,
 
2462		.template_aead = {
2463			.setkey = cc_aead_setkey,
2464			.setauthsize = cc_ccm_setauthsize,
2465			.encrypt = cc_aead_encrypt,
2466			.decrypt = cc_aead_decrypt,
2467			.init = cc_aead_init,
2468			.exit = cc_aead_exit,
2469			.ivsize = AES_BLOCK_SIZE,
2470			.maxauthsize = AES_BLOCK_SIZE,
2471		},
2472		.cipher_mode = DRV_CIPHER_CCM,
2473		.flow_mode = S_DIN_to_AES,
2474		.auth_mode = DRV_HASH_NULL,
2475		.min_hw_rev = CC_HW_REV_630,
2476		.std_body = CC_STD_NIST,
2477	},
2478	{
2479		.name = "rfc4309(ccm(aes))",
2480		.driver_name = "rfc4309-ccm-aes-ccree",
2481		.blocksize = 1,
 
2482		.template_aead = {
2483			.setkey = cc_rfc4309_ccm_setkey,
2484			.setauthsize = cc_rfc4309_ccm_setauthsize,
2485			.encrypt = cc_rfc4309_ccm_encrypt,
2486			.decrypt = cc_rfc4309_ccm_decrypt,
2487			.init = cc_aead_init,
2488			.exit = cc_aead_exit,
2489			.ivsize = CCM_BLOCK_IV_SIZE,
2490			.maxauthsize = AES_BLOCK_SIZE,
2491		},
2492		.cipher_mode = DRV_CIPHER_CCM,
2493		.flow_mode = S_DIN_to_AES,
2494		.auth_mode = DRV_HASH_NULL,
2495		.min_hw_rev = CC_HW_REV_630,
2496		.std_body = CC_STD_NIST,
2497	},
2498	{
2499		.name = "gcm(aes)",
2500		.driver_name = "gcm-aes-ccree",
2501		.blocksize = 1,
 
2502		.template_aead = {
2503			.setkey = cc_aead_setkey,
2504			.setauthsize = cc_gcm_setauthsize,
2505			.encrypt = cc_aead_encrypt,
2506			.decrypt = cc_aead_decrypt,
2507			.init = cc_aead_init,
2508			.exit = cc_aead_exit,
2509			.ivsize = 12,
2510			.maxauthsize = AES_BLOCK_SIZE,
2511		},
2512		.cipher_mode = DRV_CIPHER_GCTR,
2513		.flow_mode = S_DIN_to_AES,
2514		.auth_mode = DRV_HASH_NULL,
2515		.min_hw_rev = CC_HW_REV_630,
2516		.std_body = CC_STD_NIST,
2517	},
2518	{
2519		.name = "rfc4106(gcm(aes))",
2520		.driver_name = "rfc4106-gcm-aes-ccree",
2521		.blocksize = 1,
 
2522		.template_aead = {
2523			.setkey = cc_rfc4106_gcm_setkey,
2524			.setauthsize = cc_rfc4106_gcm_setauthsize,
2525			.encrypt = cc_rfc4106_gcm_encrypt,
2526			.decrypt = cc_rfc4106_gcm_decrypt,
2527			.init = cc_aead_init,
2528			.exit = cc_aead_exit,
2529			.ivsize = GCM_BLOCK_RFC4_IV_SIZE,
2530			.maxauthsize = AES_BLOCK_SIZE,
2531		},
2532		.cipher_mode = DRV_CIPHER_GCTR,
2533		.flow_mode = S_DIN_to_AES,
2534		.auth_mode = DRV_HASH_NULL,
2535		.min_hw_rev = CC_HW_REV_630,
2536		.std_body = CC_STD_NIST,
2537	},
2538	{
2539		.name = "rfc4543(gcm(aes))",
2540		.driver_name = "rfc4543-gcm-aes-ccree",
2541		.blocksize = 1,
 
2542		.template_aead = {
2543			.setkey = cc_rfc4543_gcm_setkey,
2544			.setauthsize = cc_rfc4543_gcm_setauthsize,
2545			.encrypt = cc_rfc4543_gcm_encrypt,
2546			.decrypt = cc_rfc4543_gcm_decrypt,
2547			.init = cc_aead_init,
2548			.exit = cc_aead_exit,
2549			.ivsize = GCM_BLOCK_RFC4_IV_SIZE,
2550			.maxauthsize = AES_BLOCK_SIZE,
2551		},
2552		.cipher_mode = DRV_CIPHER_GCTR,
2553		.flow_mode = S_DIN_to_AES,
2554		.auth_mode = DRV_HASH_NULL,
2555		.min_hw_rev = CC_HW_REV_630,
2556		.std_body = CC_STD_NIST,
2557	},
2558};
2559
2560static struct cc_crypto_alg *cc_create_aead_alg(struct cc_alg_template *tmpl,
2561						struct device *dev)
2562{
2563	struct cc_crypto_alg *t_alg;
2564	struct aead_alg *alg;
2565
2566	t_alg = devm_kzalloc(dev, sizeof(*t_alg), GFP_KERNEL);
2567	if (!t_alg)
2568		return ERR_PTR(-ENOMEM);
2569
2570	alg = &tmpl->template_aead;
2571
2572	if (snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s",
2573		     tmpl->name) >= CRYPTO_MAX_ALG_NAME)
2574		return ERR_PTR(-EINVAL);
2575	if (snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2576		     tmpl->driver_name) >= CRYPTO_MAX_ALG_NAME)
2577		return ERR_PTR(-EINVAL);
2578
2579	alg->base.cra_module = THIS_MODULE;
2580	alg->base.cra_priority = CC_CRA_PRIO;
2581
2582	alg->base.cra_ctxsize = sizeof(struct cc_aead_ctx);
2583	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2584	alg->base.cra_blocksize = tmpl->blocksize;
2585	alg->init = cc_aead_init;
2586	alg->exit = cc_aead_exit;
2587
2588	t_alg->aead_alg = *alg;
2589
2590	t_alg->cipher_mode = tmpl->cipher_mode;
2591	t_alg->flow_mode = tmpl->flow_mode;
2592	t_alg->auth_mode = tmpl->auth_mode;
2593
2594	return t_alg;
2595}
2596
2597int cc_aead_free(struct cc_drvdata *drvdata)
2598{
2599	struct cc_crypto_alg *t_alg, *n;
2600	struct cc_aead_handle *aead_handle = drvdata->aead_handle;
 
2601
2602	/* Remove registered algs */
2603	list_for_each_entry_safe(t_alg, n, &aead_handle->aead_list, entry) {
2604		crypto_unregister_aead(&t_alg->aead_alg);
2605		list_del(&t_alg->entry);
 
 
 
 
 
 
2606	}
2607
2608	return 0;
2609}
2610
2611int cc_aead_alloc(struct cc_drvdata *drvdata)
2612{
2613	struct cc_aead_handle *aead_handle;
2614	struct cc_crypto_alg *t_alg;
2615	int rc = -ENOMEM;
2616	int alg;
2617	struct device *dev = drvdata_to_dev(drvdata);
2618
2619	aead_handle = devm_kmalloc(dev, sizeof(*aead_handle), GFP_KERNEL);
2620	if (!aead_handle) {
2621		rc = -ENOMEM;
2622		goto fail0;
2623	}
2624
2625	INIT_LIST_HEAD(&aead_handle->aead_list);
2626	drvdata->aead_handle = aead_handle;
2627
2628	aead_handle->sram_workspace_addr = cc_sram_alloc(drvdata,
2629							 MAX_HMAC_DIGEST_SIZE);
2630
2631	if (aead_handle->sram_workspace_addr == NULL_SRAM_ADDR) {
 
2632		rc = -ENOMEM;
2633		goto fail1;
2634	}
2635
2636	/* Linux crypto */
2637	for (alg = 0; alg < ARRAY_SIZE(aead_algs); alg++) {
2638		if ((aead_algs[alg].min_hw_rev > drvdata->hw_rev) ||
2639		    !(drvdata->std_bodies & aead_algs[alg].std_body))
2640			continue;
2641
2642		t_alg = cc_create_aead_alg(&aead_algs[alg], dev);
2643		if (IS_ERR(t_alg)) {
2644			rc = PTR_ERR(t_alg);
2645			dev_err(dev, "%s alg allocation failed\n",
2646				aead_algs[alg].driver_name);
2647			goto fail1;
2648		}
2649		t_alg->drvdata = drvdata;
2650		rc = crypto_register_aead(&t_alg->aead_alg);
2651		if (rc) {
2652			dev_err(dev, "%s alg registration failed\n",
2653				t_alg->aead_alg.base.cra_driver_name);
2654			goto fail1;
 
 
 
 
2655		}
2656
2657		list_add_tail(&t_alg->entry, &aead_handle->aead_list);
2658		dev_dbg(dev, "Registered %s\n",
2659			t_alg->aead_alg.base.cra_driver_name);
2660	}
2661
2662	return 0;
2663
 
 
2664fail1:
2665	cc_aead_free(drvdata);
2666fail0:
2667	return rc;
2668}
v4.17
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
   3
   4#include <linux/kernel.h>
   5#include <linux/module.h>
   6#include <crypto/algapi.h>
   7#include <crypto/internal/aead.h>
   8#include <crypto/authenc.h>
   9#include <crypto/des.h>
  10#include <linux/rtnetlink.h>
 
  11#include "cc_driver.h"
  12#include "cc_buffer_mgr.h"
  13#include "cc_aead.h"
  14#include "cc_request_mgr.h"
  15#include "cc_hash.h"
  16#include "cc_sram_mgr.h"
  17
  18#define template_aead	template_u.aead
  19
  20#define MAX_AEAD_SETKEY_SEQ 12
  21#define MAX_AEAD_PROCESS_SEQ 23
  22
  23#define MAX_HMAC_DIGEST_SIZE (SHA256_DIGEST_SIZE)
  24#define MAX_HMAC_BLOCK_SIZE (SHA256_BLOCK_SIZE)
  25
  26#define AES_CCM_RFC4309_NONCE_SIZE 3
  27#define MAX_NONCE_SIZE CTR_RFC3686_NONCE_SIZE
  28
  29/* Value of each ICV_CMP byte (of 8) in case of success */
  30#define ICV_VERIF_OK 0x01
  31
  32struct cc_aead_handle {
  33	cc_sram_addr_t sram_workspace_addr;
  34	struct list_head aead_list;
  35};
  36
  37struct cc_hmac_s {
  38	u8 *padded_authkey;
  39	u8 *ipad_opad; /* IPAD, OPAD*/
  40	dma_addr_t padded_authkey_dma_addr;
  41	dma_addr_t ipad_opad_dma_addr;
  42};
  43
  44struct cc_xcbc_s {
  45	u8 *xcbc_keys; /* K1,K2,K3 */
  46	dma_addr_t xcbc_keys_dma_addr;
  47};
  48
  49struct cc_aead_ctx {
  50	struct cc_drvdata *drvdata;
  51	u8 ctr_nonce[MAX_NONCE_SIZE]; /* used for ctr3686 iv and aes ccm */
  52	u8 *enckey;
  53	dma_addr_t enckey_dma_addr;
  54	union {
  55		struct cc_hmac_s hmac;
  56		struct cc_xcbc_s xcbc;
  57	} auth_state;
  58	unsigned int enc_keylen;
  59	unsigned int auth_keylen;
  60	unsigned int authsize; /* Actual (reduced?) size of the MAC/ICv */
 
  61	enum drv_cipher_mode cipher_mode;
  62	enum cc_flow_mode flow_mode;
  63	enum drv_hash_mode auth_mode;
  64};
  65
  66static inline bool valid_assoclen(struct aead_request *req)
  67{
  68	return ((req->assoclen == 16) || (req->assoclen == 20));
  69}
  70
  71static void cc_aead_exit(struct crypto_aead *tfm)
  72{
  73	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
  74	struct device *dev = drvdata_to_dev(ctx->drvdata);
  75
  76	dev_dbg(dev, "Clearing context @%p for %s\n", crypto_aead_ctx(tfm),
  77		crypto_tfm_alg_name(&tfm->base));
  78
  79	/* Unmap enckey buffer */
  80	if (ctx->enckey) {
  81		dma_free_coherent(dev, AES_MAX_KEY_SIZE, ctx->enckey,
  82				  ctx->enckey_dma_addr);
  83		dev_dbg(dev, "Freed enckey DMA buffer enckey_dma_addr=%pad\n",
  84			&ctx->enckey_dma_addr);
  85		ctx->enckey_dma_addr = 0;
  86		ctx->enckey = NULL;
  87	}
  88
  89	if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
  90		struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
  91
  92		if (xcbc->xcbc_keys) {
  93			dma_free_coherent(dev, CC_AES_128_BIT_KEY_SIZE * 3,
  94					  xcbc->xcbc_keys,
  95					  xcbc->xcbc_keys_dma_addr);
  96		}
  97		dev_dbg(dev, "Freed xcbc_keys DMA buffer xcbc_keys_dma_addr=%pad\n",
  98			&xcbc->xcbc_keys_dma_addr);
  99		xcbc->xcbc_keys_dma_addr = 0;
 100		xcbc->xcbc_keys = NULL;
 101	} else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC auth. */
 102		struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
 103
 104		if (hmac->ipad_opad) {
 105			dma_free_coherent(dev, 2 * MAX_HMAC_DIGEST_SIZE,
 106					  hmac->ipad_opad,
 107					  hmac->ipad_opad_dma_addr);
 108			dev_dbg(dev, "Freed ipad_opad DMA buffer ipad_opad_dma_addr=%pad\n",
 109				&hmac->ipad_opad_dma_addr);
 110			hmac->ipad_opad_dma_addr = 0;
 111			hmac->ipad_opad = NULL;
 112		}
 113		if (hmac->padded_authkey) {
 114			dma_free_coherent(dev, MAX_HMAC_BLOCK_SIZE,
 115					  hmac->padded_authkey,
 116					  hmac->padded_authkey_dma_addr);
 117			dev_dbg(dev, "Freed padded_authkey DMA buffer padded_authkey_dma_addr=%pad\n",
 118				&hmac->padded_authkey_dma_addr);
 119			hmac->padded_authkey_dma_addr = 0;
 120			hmac->padded_authkey = NULL;
 121		}
 122	}
 123}
 124
 
 
 
 
 
 
 
 125static int cc_aead_init(struct crypto_aead *tfm)
 126{
 127	struct aead_alg *alg = crypto_aead_alg(tfm);
 128	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 129	struct cc_crypto_alg *cc_alg =
 130			container_of(alg, struct cc_crypto_alg, aead_alg);
 131	struct device *dev = drvdata_to_dev(cc_alg->drvdata);
 132
 133	dev_dbg(dev, "Initializing context @%p for %s\n", ctx,
 134		crypto_tfm_alg_name(&tfm->base));
 135
 136	/* Initialize modes in instance */
 137	ctx->cipher_mode = cc_alg->cipher_mode;
 138	ctx->flow_mode = cc_alg->flow_mode;
 139	ctx->auth_mode = cc_alg->auth_mode;
 140	ctx->drvdata = cc_alg->drvdata;
 141	crypto_aead_set_reqsize(tfm, sizeof(struct aead_req_ctx));
 142
 143	/* Allocate key buffer, cache line aligned */
 144	ctx->enckey = dma_alloc_coherent(dev, AES_MAX_KEY_SIZE,
 145					 &ctx->enckey_dma_addr, GFP_KERNEL);
 146	if (!ctx->enckey) {
 147		dev_err(dev, "Failed allocating key buffer\n");
 148		goto init_failed;
 149	}
 150	dev_dbg(dev, "Allocated enckey buffer in context ctx->enckey=@%p\n",
 151		ctx->enckey);
 152
 153	/* Set default authlen value */
 154
 155	if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
 156		struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
 157		const unsigned int key_size = CC_AES_128_BIT_KEY_SIZE * 3;
 158
 159		/* Allocate dma-coherent buffer for XCBC's K1+K2+K3 */
 160		/* (and temporary for user key - up to 256b) */
 161		xcbc->xcbc_keys = dma_alloc_coherent(dev, key_size,
 162						     &xcbc->xcbc_keys_dma_addr,
 163						     GFP_KERNEL);
 164		if (!xcbc->xcbc_keys) {
 165			dev_err(dev, "Failed allocating buffer for XCBC keys\n");
 166			goto init_failed;
 167		}
 168	} else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC authentication */
 169		struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
 170		const unsigned int digest_size = 2 * MAX_HMAC_DIGEST_SIZE;
 171		dma_addr_t *pkey_dma = &hmac->padded_authkey_dma_addr;
 172
 173		/* Allocate dma-coherent buffer for IPAD + OPAD */
 174		hmac->ipad_opad = dma_alloc_coherent(dev, digest_size,
 175						     &hmac->ipad_opad_dma_addr,
 176						     GFP_KERNEL);
 177
 178		if (!hmac->ipad_opad) {
 179			dev_err(dev, "Failed allocating IPAD/OPAD buffer\n");
 180			goto init_failed;
 181		}
 182
 183		dev_dbg(dev, "Allocated authkey buffer in context ctx->authkey=@%p\n",
 184			hmac->ipad_opad);
 185
 186		hmac->padded_authkey = dma_alloc_coherent(dev,
 187							  MAX_HMAC_BLOCK_SIZE,
 188							  pkey_dma,
 189							  GFP_KERNEL);
 190
 191		if (!hmac->padded_authkey) {
 192			dev_err(dev, "failed to allocate padded_authkey\n");
 193			goto init_failed;
 194		}
 195	} else {
 196		ctx->auth_state.hmac.ipad_opad = NULL;
 197		ctx->auth_state.hmac.padded_authkey = NULL;
 198	}
 
 199
 200	return 0;
 201
 202init_failed:
 203	cc_aead_exit(tfm);
 204	return -ENOMEM;
 205}
 206
 207static void cc_aead_complete(struct device *dev, void *cc_req, int err)
 208{
 209	struct aead_request *areq = (struct aead_request *)cc_req;
 210	struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
 211	struct crypto_aead *tfm = crypto_aead_reqtfm(cc_req);
 212	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 213
 
 
 
 
 214	cc_unmap_aead_request(dev, areq);
 215
 216	/* Restore ordinary iv pointer */
 217	areq->iv = areq_ctx->backup_iv;
 218
 219	if (err)
 220		goto done;
 221
 222	if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
 223		if (memcmp(areq_ctx->mac_buf, areq_ctx->icv_virt_addr,
 224			   ctx->authsize) != 0) {
 225			dev_dbg(dev, "Payload authentication failure, (auth-size=%d, cipher=%d)\n",
 226				ctx->authsize, ctx->cipher_mode);
 227			/* In case of payload authentication failure, MUST NOT
 228			 * revealed the decrypted message --> zero its memory.
 229			 */
 230			cc_zero_sgl(areq->dst, areq_ctx->cryptlen);
 
 231			err = -EBADMSG;
 232		}
 233	} else { /*ENCRYPT*/
 234		if (areq_ctx->is_icv_fragmented) {
 235			u32 skip = areq->cryptlen + areq_ctx->dst_offset;
 236
 237			cc_copy_sg_portion(dev, areq_ctx->mac_buf,
 238					   areq_ctx->dst_sgl, skip,
 239					   (skip + ctx->authsize),
 240					   CC_SG_FROM_BUF);
 241		}
 242
 243		/* If an IV was generated, copy it back to the user provided
 244		 * buffer.
 245		 */
 246		if (areq_ctx->backup_giv) {
 247			if (ctx->cipher_mode == DRV_CIPHER_CTR)
 248				memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv +
 249				       CTR_RFC3686_NONCE_SIZE,
 250				       CTR_RFC3686_IV_SIZE);
 251			else if (ctx->cipher_mode == DRV_CIPHER_CCM)
 252				memcpy(areq_ctx->backup_giv, areq_ctx->ctr_iv +
 253				       CCM_BLOCK_IV_OFFSET, CCM_BLOCK_IV_SIZE);
 254		}
 255	}
 256done:
 257	aead_request_complete(areq, err);
 258}
 259
 260static unsigned int xcbc_setkey(struct cc_hw_desc *desc,
 261				struct cc_aead_ctx *ctx)
 262{
 263	/* Load the AES key */
 264	hw_desc_init(&desc[0]);
 265	/* We are using for the source/user key the same buffer
 266	 * as for the output keys, * because after this key loading it
 267	 * is not needed anymore
 268	 */
 269	set_din_type(&desc[0], DMA_DLLI,
 270		     ctx->auth_state.xcbc.xcbc_keys_dma_addr, ctx->auth_keylen,
 271		     NS_BIT);
 272	set_cipher_mode(&desc[0], DRV_CIPHER_ECB);
 273	set_cipher_config0(&desc[0], DRV_CRYPTO_DIRECTION_ENCRYPT);
 274	set_key_size_aes(&desc[0], ctx->auth_keylen);
 275	set_flow_mode(&desc[0], S_DIN_to_AES);
 276	set_setup_mode(&desc[0], SETUP_LOAD_KEY0);
 277
 278	hw_desc_init(&desc[1]);
 279	set_din_const(&desc[1], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
 280	set_flow_mode(&desc[1], DIN_AES_DOUT);
 281	set_dout_dlli(&desc[1], ctx->auth_state.xcbc.xcbc_keys_dma_addr,
 282		      AES_KEYSIZE_128, NS_BIT, 0);
 283
 284	hw_desc_init(&desc[2]);
 285	set_din_const(&desc[2], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
 286	set_flow_mode(&desc[2], DIN_AES_DOUT);
 287	set_dout_dlli(&desc[2], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
 288					 + AES_KEYSIZE_128),
 289			      AES_KEYSIZE_128, NS_BIT, 0);
 290
 291	hw_desc_init(&desc[3]);
 292	set_din_const(&desc[3], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
 293	set_flow_mode(&desc[3], DIN_AES_DOUT);
 294	set_dout_dlli(&desc[3], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
 295					  + 2 * AES_KEYSIZE_128),
 296			      AES_KEYSIZE_128, NS_BIT, 0);
 297
 298	return 4;
 299}
 300
 301static int hmac_setkey(struct cc_hw_desc *desc, struct cc_aead_ctx *ctx)
 
 302{
 303	unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
 304	unsigned int digest_ofs = 0;
 305	unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
 306			DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
 307	unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
 308			CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
 309	struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
 310
 311	unsigned int idx = 0;
 312	int i;
 313
 314	/* calc derived HMAC key */
 315	for (i = 0; i < 2; i++) {
 316		/* Load hash initial state */
 317		hw_desc_init(&desc[idx]);
 318		set_cipher_mode(&desc[idx], hash_mode);
 319		set_din_sram(&desc[idx],
 320			     cc_larval_digest_addr(ctx->drvdata,
 321						   ctx->auth_mode),
 322			     digest_size);
 323		set_flow_mode(&desc[idx], S_DIN_to_HASH);
 324		set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 325		idx++;
 326
 327		/* Load the hash current length*/
 328		hw_desc_init(&desc[idx]);
 329		set_cipher_mode(&desc[idx], hash_mode);
 330		set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
 331		set_flow_mode(&desc[idx], S_DIN_to_HASH);
 332		set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 333		idx++;
 334
 335		/* Prepare ipad key */
 336		hw_desc_init(&desc[idx]);
 337		set_xor_val(&desc[idx], hmac_pad_const[i]);
 338		set_cipher_mode(&desc[idx], hash_mode);
 339		set_flow_mode(&desc[idx], S_DIN_to_HASH);
 340		set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
 341		idx++;
 342
 343		/* Perform HASH update */
 344		hw_desc_init(&desc[idx]);
 345		set_din_type(&desc[idx], DMA_DLLI,
 346			     hmac->padded_authkey_dma_addr,
 347			     SHA256_BLOCK_SIZE, NS_BIT);
 348		set_cipher_mode(&desc[idx], hash_mode);
 349		set_xor_active(&desc[idx]);
 350		set_flow_mode(&desc[idx], DIN_HASH);
 351		idx++;
 352
 353		/* Get the digset */
 354		hw_desc_init(&desc[idx]);
 355		set_cipher_mode(&desc[idx], hash_mode);
 356		set_dout_dlli(&desc[idx],
 357			      (hmac->ipad_opad_dma_addr + digest_ofs),
 358			      digest_size, NS_BIT, 0);
 359		set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 360		set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 361		set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
 362		idx++;
 363
 364		digest_ofs += digest_size;
 365	}
 366
 367	return idx;
 368}
 369
 370static int validate_keys_sizes(struct cc_aead_ctx *ctx)
 371{
 372	struct device *dev = drvdata_to_dev(ctx->drvdata);
 373
 374	dev_dbg(dev, "enc_keylen=%u  authkeylen=%u\n",
 375		ctx->enc_keylen, ctx->auth_keylen);
 376
 377	switch (ctx->auth_mode) {
 378	case DRV_HASH_SHA1:
 379	case DRV_HASH_SHA256:
 380		break;
 381	case DRV_HASH_XCBC_MAC:
 382		if (ctx->auth_keylen != AES_KEYSIZE_128 &&
 383		    ctx->auth_keylen != AES_KEYSIZE_192 &&
 384		    ctx->auth_keylen != AES_KEYSIZE_256)
 385			return -ENOTSUPP;
 386		break;
 387	case DRV_HASH_NULL: /* Not authenc (e.g., CCM) - no auth_key) */
 388		if (ctx->auth_keylen > 0)
 389			return -EINVAL;
 390		break;
 391	default:
 392		dev_err(dev, "Invalid auth_mode=%d\n", ctx->auth_mode);
 393		return -EINVAL;
 394	}
 395	/* Check cipher key size */
 396	if (ctx->flow_mode == S_DIN_to_DES) {
 397		if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) {
 398			dev_err(dev, "Invalid cipher(3DES) key size: %u\n",
 399				ctx->enc_keylen);
 400			return -EINVAL;
 401		}
 402	} else { /* Default assumed to be AES ciphers */
 403		if (ctx->enc_keylen != AES_KEYSIZE_128 &&
 404		    ctx->enc_keylen != AES_KEYSIZE_192 &&
 405		    ctx->enc_keylen != AES_KEYSIZE_256) {
 406			dev_err(dev, "Invalid cipher(AES) key size: %u\n",
 407				ctx->enc_keylen);
 408			return -EINVAL;
 409		}
 410	}
 411
 412	return 0; /* All tests of keys sizes passed */
 413}
 414
 415/* This function prepers the user key so it can pass to the hmac processing
 416 * (copy to intenral buffer or hash in case of key longer than block
 417 */
 418static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *key,
 419				 unsigned int keylen)
 420{
 421	dma_addr_t key_dma_addr = 0;
 422	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 423	struct device *dev = drvdata_to_dev(ctx->drvdata);
 424	u32 larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->auth_mode);
 425	struct cc_crypto_req cc_req = {};
 426	unsigned int blocksize;
 427	unsigned int digestsize;
 428	unsigned int hashmode;
 429	unsigned int idx = 0;
 430	int rc = 0;
 
 431	struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
 432	dma_addr_t padded_authkey_dma_addr =
 433		ctx->auth_state.hmac.padded_authkey_dma_addr;
 434
 435	switch (ctx->auth_mode) { /* auth_key required and >0 */
 436	case DRV_HASH_SHA1:
 437		blocksize = SHA1_BLOCK_SIZE;
 438		digestsize = SHA1_DIGEST_SIZE;
 439		hashmode = DRV_HASH_HW_SHA1;
 440		break;
 441	case DRV_HASH_SHA256:
 442	default:
 443		blocksize = SHA256_BLOCK_SIZE;
 444		digestsize = SHA256_DIGEST_SIZE;
 445		hashmode = DRV_HASH_HW_SHA256;
 446	}
 447
 448	if (keylen != 0) {
 449		key_dma_addr = dma_map_single(dev, (void *)key, keylen,
 450					      DMA_TO_DEVICE);
 
 
 
 
 451		if (dma_mapping_error(dev, key_dma_addr)) {
 452			dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
 453				key, keylen);
 
 454			return -ENOMEM;
 455		}
 456		if (keylen > blocksize) {
 457			/* Load hash initial state */
 458			hw_desc_init(&desc[idx]);
 459			set_cipher_mode(&desc[idx], hashmode);
 
 
 460			set_din_sram(&desc[idx], larval_addr, digestsize);
 461			set_flow_mode(&desc[idx], S_DIN_to_HASH);
 462			set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 463			idx++;
 464
 465			/* Load the hash current length*/
 466			hw_desc_init(&desc[idx]);
 467			set_cipher_mode(&desc[idx], hashmode);
 468			set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
 469			set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
 470			set_flow_mode(&desc[idx], S_DIN_to_HASH);
 471			set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 472			idx++;
 473
 474			hw_desc_init(&desc[idx]);
 475			set_din_type(&desc[idx], DMA_DLLI,
 476				     key_dma_addr, keylen, NS_BIT);
 477			set_flow_mode(&desc[idx], DIN_HASH);
 478			idx++;
 479
 480			/* Get hashed key */
 481			hw_desc_init(&desc[idx]);
 482			set_cipher_mode(&desc[idx], hashmode);
 483			set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
 484				      digestsize, NS_BIT, 0);
 485			set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 486			set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 487			set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
 488			set_cipher_config0(&desc[idx],
 489					   HASH_DIGEST_RESULT_LITTLE_ENDIAN);
 490			idx++;
 491
 492			hw_desc_init(&desc[idx]);
 493			set_din_const(&desc[idx], 0, (blocksize - digestsize));
 494			set_flow_mode(&desc[idx], BYPASS);
 495			set_dout_dlli(&desc[idx], (padded_authkey_dma_addr +
 496				      digestsize), (blocksize - digestsize),
 497				      NS_BIT, 0);
 498			idx++;
 499		} else {
 500			hw_desc_init(&desc[idx]);
 501			set_din_type(&desc[idx], DMA_DLLI, key_dma_addr,
 502				     keylen, NS_BIT);
 503			set_flow_mode(&desc[idx], BYPASS);
 504			set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
 505				      keylen, NS_BIT, 0);
 506			idx++;
 507
 508			if ((blocksize - keylen) != 0) {
 509				hw_desc_init(&desc[idx]);
 510				set_din_const(&desc[idx], 0,
 511					      (blocksize - keylen));
 512				set_flow_mode(&desc[idx], BYPASS);
 513				set_dout_dlli(&desc[idx],
 514					      (padded_authkey_dma_addr +
 515					       keylen),
 516					      (blocksize - keylen), NS_BIT, 0);
 517				idx++;
 518			}
 519		}
 520	} else {
 521		hw_desc_init(&desc[idx]);
 522		set_din_const(&desc[idx], 0, (blocksize - keylen));
 523		set_flow_mode(&desc[idx], BYPASS);
 524		set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
 525			      blocksize, NS_BIT, 0);
 526		idx++;
 527	}
 528
 529	rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
 530	if (rc)
 531		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
 532
 533	if (key_dma_addr)
 534		dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
 535
 
 
 536	return rc;
 537}
 538
 539static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
 540			  unsigned int keylen)
 541{
 542	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 543	struct rtattr *rta = (struct rtattr *)key;
 544	struct cc_crypto_req cc_req = {};
 545	struct crypto_authenc_key_param *param;
 546	struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
 547	int rc = -EINVAL;
 548	unsigned int seq_len = 0;
 549	struct device *dev = drvdata_to_dev(ctx->drvdata);
 
 
 550
 551	dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n",
 552		ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen);
 553
 554	/* STAT_PHASE_0: Init and sanity checks */
 555
 556	if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */
 557		if (!RTA_OK(rta, keylen))
 558			goto badkey;
 559		if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
 560			goto badkey;
 561		if (RTA_PAYLOAD(rta) < sizeof(*param))
 562			goto badkey;
 563		param = RTA_DATA(rta);
 564		ctx->enc_keylen = be32_to_cpu(param->enckeylen);
 565		key += RTA_ALIGN(rta->rta_len);
 566		keylen -= RTA_ALIGN(rta->rta_len);
 567		if (keylen < ctx->enc_keylen)
 568			goto badkey;
 569		ctx->auth_keylen = keylen - ctx->enc_keylen;
 570
 571		if (ctx->cipher_mode == DRV_CIPHER_CTR) {
 572			/* the nonce is stored in bytes at end of key */
 573			if (ctx->enc_keylen <
 574			    (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE))
 575				goto badkey;
 576			/* Copy nonce from last 4 bytes in CTR key to
 577			 *  first 4 bytes in CTR IV
 578			 */
 579			memcpy(ctx->ctr_nonce, key + ctx->auth_keylen +
 580			       ctx->enc_keylen - CTR_RFC3686_NONCE_SIZE,
 581			       CTR_RFC3686_NONCE_SIZE);
 582			/* Set CTR key size */
 583			ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE;
 584		}
 585	} else { /* non-authenc - has just one key */
 
 
 586		ctx->enc_keylen = keylen;
 587		ctx->auth_keylen = 0;
 588	}
 589
 590	rc = validate_keys_sizes(ctx);
 591	if (rc)
 592		goto badkey;
 593
 594	/* STAT_PHASE_1: Copy key to ctx */
 595
 596	/* Get key material */
 597	memcpy(ctx->enckey, key + ctx->auth_keylen, ctx->enc_keylen);
 598	if (ctx->enc_keylen == 24)
 599		memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
 600	if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
 601		memcpy(ctx->auth_state.xcbc.xcbc_keys, key, ctx->auth_keylen);
 
 602	} else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
 603		rc = cc_get_plain_hmac_key(tfm, key, ctx->auth_keylen);
 604		if (rc)
 605			goto badkey;
 606	}
 607
 608	/* STAT_PHASE_2: Create sequence */
 609
 610	switch (ctx->auth_mode) {
 611	case DRV_HASH_SHA1:
 612	case DRV_HASH_SHA256:
 613		seq_len = hmac_setkey(desc, ctx);
 614		break;
 615	case DRV_HASH_XCBC_MAC:
 616		seq_len = xcbc_setkey(desc, ctx);
 617		break;
 618	case DRV_HASH_NULL: /* non-authenc modes, e.g., CCM */
 619		break; /* No auth. key setup */
 620	default:
 621		dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
 622		rc = -ENOTSUPP;
 623		goto badkey;
 624	}
 625
 626	/* STAT_PHASE_3: Submit sequence to HW */
 627
 628	if (seq_len > 0) { /* For CCM there is no sequence to setup the key */
 629		rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, seq_len);
 630		if (rc) {
 631			dev_err(dev, "send_request() failed (rc=%d)\n", rc);
 632			goto setkey_error;
 633		}
 634	}
 635
 636	/* Update STAT_PHASE_3 */
 637	return rc;
 
 
 
 
 
 
 
 
 
 
 
 638
 639badkey:
 640	crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
 641
 642setkey_error:
 643	return rc;
 644}
 645
 646static int cc_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key,
 647				 unsigned int keylen)
 648{
 649	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 650
 651	if (keylen < 3)
 652		return -EINVAL;
 653
 654	keylen -= 3;
 655	memcpy(ctx->ctr_nonce, key + keylen, 3);
 656
 657	return cc_aead_setkey(tfm, key, keylen);
 658}
 659
 660static int cc_aead_setauthsize(struct crypto_aead *authenc,
 661			       unsigned int authsize)
 662{
 663	struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
 664	struct device *dev = drvdata_to_dev(ctx->drvdata);
 665
 666	/* Unsupported auth. sizes */
 667	if (authsize == 0 ||
 668	    authsize > crypto_aead_maxauthsize(authenc)) {
 669		return -ENOTSUPP;
 670	}
 671
 672	ctx->authsize = authsize;
 673	dev_dbg(dev, "authlen=%d\n", ctx->authsize);
 674
 675	return 0;
 676}
 677
 678static int cc_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
 679				      unsigned int authsize)
 680{
 681	switch (authsize) {
 682	case 8:
 683	case 12:
 684	case 16:
 685		break;
 686	default:
 687		return -EINVAL;
 688	}
 689
 690	return cc_aead_setauthsize(authenc, authsize);
 691}
 692
 693static int cc_ccm_setauthsize(struct crypto_aead *authenc,
 694			      unsigned int authsize)
 695{
 696	switch (authsize) {
 697	case 4:
 698	case 6:
 699	case 8:
 700	case 10:
 701	case 12:
 702	case 14:
 703	case 16:
 704		break;
 705	default:
 706		return -EINVAL;
 707	}
 708
 709	return cc_aead_setauthsize(authenc, authsize);
 710}
 711
 712static void cc_set_assoc_desc(struct aead_request *areq, unsigned int flow_mode,
 713			      struct cc_hw_desc desc[], unsigned int *seq_size)
 714{
 715	struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
 716	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 717	struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
 718	enum cc_req_dma_buf_type assoc_dma_type = areq_ctx->assoc_buff_type;
 719	unsigned int idx = *seq_size;
 720	struct device *dev = drvdata_to_dev(ctx->drvdata);
 721
 722	switch (assoc_dma_type) {
 723	case CC_DMA_BUF_DLLI:
 724		dev_dbg(dev, "ASSOC buffer type DLLI\n");
 725		hw_desc_init(&desc[idx]);
 726		set_din_type(&desc[idx], DMA_DLLI, sg_dma_address(areq->src),
 727			     areq->assoclen, NS_BIT);
 728		set_flow_mode(&desc[idx], flow_mode);
 729		if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
 730		    areq_ctx->cryptlen > 0)
 731			set_din_not_last_indication(&desc[idx]);
 732		break;
 733	case CC_DMA_BUF_MLLI:
 734		dev_dbg(dev, "ASSOC buffer type MLLI\n");
 735		hw_desc_init(&desc[idx]);
 736		set_din_type(&desc[idx], DMA_MLLI, areq_ctx->assoc.sram_addr,
 737			     areq_ctx->assoc.mlli_nents, NS_BIT);
 738		set_flow_mode(&desc[idx], flow_mode);
 739		if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
 740		    areq_ctx->cryptlen > 0)
 741			set_din_not_last_indication(&desc[idx]);
 742		break;
 743	case CC_DMA_BUF_NULL:
 744	default:
 745		dev_err(dev, "Invalid ASSOC buffer type\n");
 746	}
 747
 748	*seq_size = (++idx);
 749}
 750
 751static void cc_proc_authen_desc(struct aead_request *areq,
 752				unsigned int flow_mode,
 753				struct cc_hw_desc desc[],
 754				unsigned int *seq_size, int direct)
 755{
 756	struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
 757	enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
 758	unsigned int idx = *seq_size;
 759	struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
 760	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 761	struct device *dev = drvdata_to_dev(ctx->drvdata);
 762
 763	switch (data_dma_type) {
 764	case CC_DMA_BUF_DLLI:
 765	{
 766		struct scatterlist *cipher =
 767			(direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
 768			areq_ctx->dst_sgl : areq_ctx->src_sgl;
 769
 770		unsigned int offset =
 771			(direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
 772			areq_ctx->dst_offset : areq_ctx->src_offset;
 773		dev_dbg(dev, "AUTHENC: SRC/DST buffer type DLLI\n");
 774		hw_desc_init(&desc[idx]);
 775		set_din_type(&desc[idx], DMA_DLLI,
 776			     (sg_dma_address(cipher) + offset),
 777			     areq_ctx->cryptlen, NS_BIT);
 778		set_flow_mode(&desc[idx], flow_mode);
 779		break;
 780	}
 781	case CC_DMA_BUF_MLLI:
 782	{
 783		/* DOUBLE-PASS flow (as default)
 784		 * assoc. + iv + data -compact in one table
 785		 * if assoclen is ZERO only IV perform
 786		 */
 787		cc_sram_addr_t mlli_addr = areq_ctx->assoc.sram_addr;
 788		u32 mlli_nents = areq_ctx->assoc.mlli_nents;
 789
 790		if (areq_ctx->is_single_pass) {
 791			if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
 792				mlli_addr = areq_ctx->dst.sram_addr;
 793				mlli_nents = areq_ctx->dst.mlli_nents;
 794			} else {
 795				mlli_addr = areq_ctx->src.sram_addr;
 796				mlli_nents = areq_ctx->src.mlli_nents;
 797			}
 798		}
 799
 800		dev_dbg(dev, "AUTHENC: SRC/DST buffer type MLLI\n");
 801		hw_desc_init(&desc[idx]);
 802		set_din_type(&desc[idx], DMA_MLLI, mlli_addr, mlli_nents,
 803			     NS_BIT);
 804		set_flow_mode(&desc[idx], flow_mode);
 805		break;
 806	}
 807	case CC_DMA_BUF_NULL:
 808	default:
 809		dev_err(dev, "AUTHENC: Invalid SRC/DST buffer type\n");
 810	}
 811
 812	*seq_size = (++idx);
 813}
 814
 815static void cc_proc_cipher_desc(struct aead_request *areq,
 816				unsigned int flow_mode,
 817				struct cc_hw_desc desc[],
 818				unsigned int *seq_size)
 819{
 820	unsigned int idx = *seq_size;
 821	struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
 822	enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
 823	struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
 824	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 825	struct device *dev = drvdata_to_dev(ctx->drvdata);
 826
 827	if (areq_ctx->cryptlen == 0)
 828		return; /*null processing*/
 829
 830	switch (data_dma_type) {
 831	case CC_DMA_BUF_DLLI:
 832		dev_dbg(dev, "CIPHER: SRC/DST buffer type DLLI\n");
 833		hw_desc_init(&desc[idx]);
 834		set_din_type(&desc[idx], DMA_DLLI,
 835			     (sg_dma_address(areq_ctx->src_sgl) +
 836			      areq_ctx->src_offset), areq_ctx->cryptlen,
 837			      NS_BIT);
 838		set_dout_dlli(&desc[idx],
 839			      (sg_dma_address(areq_ctx->dst_sgl) +
 840			       areq_ctx->dst_offset),
 841			      areq_ctx->cryptlen, NS_BIT, 0);
 842		set_flow_mode(&desc[idx], flow_mode);
 843		break;
 844	case CC_DMA_BUF_MLLI:
 845		dev_dbg(dev, "CIPHER: SRC/DST buffer type MLLI\n");
 846		hw_desc_init(&desc[idx]);
 847		set_din_type(&desc[idx], DMA_MLLI, areq_ctx->src.sram_addr,
 848			     areq_ctx->src.mlli_nents, NS_BIT);
 849		set_dout_mlli(&desc[idx], areq_ctx->dst.sram_addr,
 850			      areq_ctx->dst.mlli_nents, NS_BIT, 0);
 851		set_flow_mode(&desc[idx], flow_mode);
 852		break;
 853	case CC_DMA_BUF_NULL:
 854	default:
 855		dev_err(dev, "CIPHER: Invalid SRC/DST buffer type\n");
 856	}
 857
 858	*seq_size = (++idx);
 859}
 860
 861static void cc_proc_digest_desc(struct aead_request *req,
 862				struct cc_hw_desc desc[],
 863				unsigned int *seq_size)
 864{
 865	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 866	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 867	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
 868	unsigned int idx = *seq_size;
 869	unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
 870				DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
 871	int direct = req_ctx->gen_ctx.op_type;
 872
 873	/* Get final ICV result */
 874	if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
 875		hw_desc_init(&desc[idx]);
 876		set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 877		set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 878		set_dout_dlli(&desc[idx], req_ctx->icv_dma_addr, ctx->authsize,
 879			      NS_BIT, 1);
 880		set_queue_last_ind(ctx->drvdata, &desc[idx]);
 881		if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
 882			set_aes_not_hash_mode(&desc[idx]);
 883			set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
 884		} else {
 885			set_cipher_config0(&desc[idx],
 886					   HASH_DIGEST_RESULT_LITTLE_ENDIAN);
 887			set_cipher_mode(&desc[idx], hash_mode);
 888		}
 889	} else { /*Decrypt*/
 890		/* Get ICV out from hardware */
 891		hw_desc_init(&desc[idx]);
 892		set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 893		set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 894		set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr,
 895			      ctx->authsize, NS_BIT, 1);
 896		set_queue_last_ind(ctx->drvdata, &desc[idx]);
 897		set_cipher_config0(&desc[idx],
 898				   HASH_DIGEST_RESULT_LITTLE_ENDIAN);
 899		set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
 900		if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
 901			set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
 902			set_aes_not_hash_mode(&desc[idx]);
 903		} else {
 904			set_cipher_mode(&desc[idx], hash_mode);
 905		}
 906	}
 907
 908	*seq_size = (++idx);
 909}
 910
 911static void cc_set_cipher_desc(struct aead_request *req,
 912			       struct cc_hw_desc desc[],
 913			       unsigned int *seq_size)
 914{
 915	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 916	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 917	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
 918	unsigned int hw_iv_size = req_ctx->hw_iv_size;
 919	unsigned int idx = *seq_size;
 920	int direct = req_ctx->gen_ctx.op_type;
 921
 922	/* Setup cipher state */
 923	hw_desc_init(&desc[idx]);
 924	set_cipher_config0(&desc[idx], direct);
 925	set_flow_mode(&desc[idx], ctx->flow_mode);
 926	set_din_type(&desc[idx], DMA_DLLI, req_ctx->gen_ctx.iv_dma_addr,
 927		     hw_iv_size, NS_BIT);
 928	if (ctx->cipher_mode == DRV_CIPHER_CTR)
 929		set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
 930	else
 931		set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 932	set_cipher_mode(&desc[idx], ctx->cipher_mode);
 933	idx++;
 934
 935	/* Setup enc. key */
 936	hw_desc_init(&desc[idx]);
 937	set_cipher_config0(&desc[idx], direct);
 938	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 939	set_flow_mode(&desc[idx], ctx->flow_mode);
 940	if (ctx->flow_mode == S_DIN_to_AES) {
 941		set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
 942			     ((ctx->enc_keylen == 24) ? CC_AES_KEY_SIZE_MAX :
 943			      ctx->enc_keylen), NS_BIT);
 944		set_key_size_aes(&desc[idx], ctx->enc_keylen);
 945	} else {
 946		set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
 947			     ctx->enc_keylen, NS_BIT);
 948		set_key_size_des(&desc[idx], ctx->enc_keylen);
 949	}
 950	set_cipher_mode(&desc[idx], ctx->cipher_mode);
 951	idx++;
 952
 953	*seq_size = idx;
 954}
 955
 956static void cc_proc_cipher(struct aead_request *req, struct cc_hw_desc desc[],
 957			   unsigned int *seq_size, unsigned int data_flow_mode)
 958{
 959	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
 960	int direct = req_ctx->gen_ctx.op_type;
 961	unsigned int idx = *seq_size;
 962
 963	if (req_ctx->cryptlen == 0)
 964		return; /*null processing*/
 965
 966	cc_set_cipher_desc(req, desc, &idx);
 967	cc_proc_cipher_desc(req, data_flow_mode, desc, &idx);
 968	if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
 969		/* We must wait for DMA to write all cipher */
 970		hw_desc_init(&desc[idx]);
 971		set_din_no_dma(&desc[idx], 0, 0xfffff0);
 972		set_dout_no_dma(&desc[idx], 0, 0, 1);
 973		idx++;
 974	}
 975
 976	*seq_size = idx;
 977}
 978
 979static void cc_set_hmac_desc(struct aead_request *req, struct cc_hw_desc desc[],
 980			     unsigned int *seq_size)
 981{
 982	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 983	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 984	unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
 985				DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
 986	unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
 987				CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
 988	unsigned int idx = *seq_size;
 989
 990	/* Loading hash ipad xor key state */
 991	hw_desc_init(&desc[idx]);
 992	set_cipher_mode(&desc[idx], hash_mode);
 993	set_din_type(&desc[idx], DMA_DLLI,
 994		     ctx->auth_state.hmac.ipad_opad_dma_addr, digest_size,
 995		     NS_BIT);
 996	set_flow_mode(&desc[idx], S_DIN_to_HASH);
 997	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 998	idx++;
 999
1000	/* Load init. digest len (64 bytes) */
1001	hw_desc_init(&desc[idx]);
1002	set_cipher_mode(&desc[idx], hash_mode);
1003	set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
1004		     ctx->drvdata->hash_len_sz);
1005	set_flow_mode(&desc[idx], S_DIN_to_HASH);
1006	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1007	idx++;
1008
1009	*seq_size = idx;
1010}
1011
1012static void cc_set_xcbc_desc(struct aead_request *req, struct cc_hw_desc desc[],
1013			     unsigned int *seq_size)
1014{
1015	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1016	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1017	unsigned int idx = *seq_size;
1018
1019	/* Loading MAC state */
1020	hw_desc_init(&desc[idx]);
1021	set_din_const(&desc[idx], 0, CC_AES_BLOCK_SIZE);
1022	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1023	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1024	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1025	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1026	set_flow_mode(&desc[idx], S_DIN_to_HASH);
1027	set_aes_not_hash_mode(&desc[idx]);
1028	idx++;
1029
1030	/* Setup XCBC MAC K1 */
1031	hw_desc_init(&desc[idx]);
1032	set_din_type(&desc[idx], DMA_DLLI,
1033		     ctx->auth_state.xcbc.xcbc_keys_dma_addr,
1034		     AES_KEYSIZE_128, NS_BIT);
1035	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1036	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1037	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1038	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1039	set_flow_mode(&desc[idx], S_DIN_to_HASH);
1040	set_aes_not_hash_mode(&desc[idx]);
1041	idx++;
1042
1043	/* Setup XCBC MAC K2 */
1044	hw_desc_init(&desc[idx]);
1045	set_din_type(&desc[idx], DMA_DLLI,
1046		     (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
1047		      AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT);
1048	set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1049	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1050	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1051	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1052	set_flow_mode(&desc[idx], S_DIN_to_HASH);
1053	set_aes_not_hash_mode(&desc[idx]);
1054	idx++;
1055
1056	/* Setup XCBC MAC K3 */
1057	hw_desc_init(&desc[idx]);
1058	set_din_type(&desc[idx], DMA_DLLI,
1059		     (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
1060		      2 * AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT);
1061	set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
1062	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1063	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1064	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1065	set_flow_mode(&desc[idx], S_DIN_to_HASH);
1066	set_aes_not_hash_mode(&desc[idx]);
1067	idx++;
1068
1069	*seq_size = idx;
1070}
1071
1072static void cc_proc_header_desc(struct aead_request *req,
1073				struct cc_hw_desc desc[],
1074				unsigned int *seq_size)
1075{
 
1076	unsigned int idx = *seq_size;
 
1077	/* Hash associated data */
1078	if (req->assoclen > 0)
1079		cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
1080
1081	/* Hash IV */
1082	*seq_size = idx;
1083}
1084
1085static void cc_proc_scheme_desc(struct aead_request *req,
1086				struct cc_hw_desc desc[],
1087				unsigned int *seq_size)
1088{
1089	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1090	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1091	struct cc_aead_handle *aead_handle = ctx->drvdata->aead_handle;
1092	unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
1093				DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
1094	unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
1095				CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
1096	unsigned int idx = *seq_size;
1097
1098	hw_desc_init(&desc[idx]);
1099	set_cipher_mode(&desc[idx], hash_mode);
1100	set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
1101		      ctx->drvdata->hash_len_sz);
1102	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1103	set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
1104	set_cipher_do(&desc[idx], DO_PAD);
1105	idx++;
1106
1107	/* Get final ICV result */
1108	hw_desc_init(&desc[idx]);
1109	set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
1110		      digest_size);
1111	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1112	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1113	set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
1114	set_cipher_mode(&desc[idx], hash_mode);
1115	idx++;
1116
1117	/* Loading hash opad xor key state */
1118	hw_desc_init(&desc[idx]);
1119	set_cipher_mode(&desc[idx], hash_mode);
1120	set_din_type(&desc[idx], DMA_DLLI,
1121		     (ctx->auth_state.hmac.ipad_opad_dma_addr + digest_size),
1122		     digest_size, NS_BIT);
1123	set_flow_mode(&desc[idx], S_DIN_to_HASH);
1124	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1125	idx++;
1126
1127	/* Load init. digest len (64 bytes) */
1128	hw_desc_init(&desc[idx]);
1129	set_cipher_mode(&desc[idx], hash_mode);
1130	set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
1131		     ctx->drvdata->hash_len_sz);
1132	set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1133	set_flow_mode(&desc[idx], S_DIN_to_HASH);
1134	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1135	idx++;
1136
1137	/* Perform HASH update */
1138	hw_desc_init(&desc[idx]);
1139	set_din_sram(&desc[idx], aead_handle->sram_workspace_addr,
1140		     digest_size);
1141	set_flow_mode(&desc[idx], DIN_HASH);
1142	idx++;
1143
1144	*seq_size = idx;
1145}
1146
1147static void cc_mlli_to_sram(struct aead_request *req,
1148			    struct cc_hw_desc desc[], unsigned int *seq_size)
1149{
1150	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1151	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1152	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1153	struct device *dev = drvdata_to_dev(ctx->drvdata);
1154
1155	if (req_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
1156	    req_ctx->data_buff_type == CC_DMA_BUF_MLLI ||
1157	    !req_ctx->is_single_pass) {
1158		dev_dbg(dev, "Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
1159			(unsigned int)ctx->drvdata->mlli_sram_addr,
1160			req_ctx->mlli_params.mlli_len);
1161		/* Copy MLLI table host-to-sram */
1162		hw_desc_init(&desc[*seq_size]);
1163		set_din_type(&desc[*seq_size], DMA_DLLI,
1164			     req_ctx->mlli_params.mlli_dma_addr,
1165			     req_ctx->mlli_params.mlli_len, NS_BIT);
1166		set_dout_sram(&desc[*seq_size],
1167			      ctx->drvdata->mlli_sram_addr,
1168			      req_ctx->mlli_params.mlli_len);
1169		set_flow_mode(&desc[*seq_size], BYPASS);
1170		(*seq_size)++;
1171	}
1172}
1173
1174static enum cc_flow_mode cc_get_data_flow(enum drv_crypto_direction direct,
1175					  enum cc_flow_mode setup_flow_mode,
1176					  bool is_single_pass)
1177{
1178	enum cc_flow_mode data_flow_mode;
1179
1180	if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1181		if (setup_flow_mode == S_DIN_to_AES)
1182			data_flow_mode = is_single_pass ?
1183				AES_to_HASH_and_DOUT : DIN_AES_DOUT;
1184		else
1185			data_flow_mode = is_single_pass ?
1186				DES_to_HASH_and_DOUT : DIN_DES_DOUT;
1187	} else { /* Decrypt */
1188		if (setup_flow_mode == S_DIN_to_AES)
1189			data_flow_mode = is_single_pass ?
1190				AES_and_HASH : DIN_AES_DOUT;
1191		else
1192			data_flow_mode = is_single_pass ?
1193				DES_and_HASH : DIN_DES_DOUT;
1194	}
1195
1196	return data_flow_mode;
1197}
1198
1199static void cc_hmac_authenc(struct aead_request *req, struct cc_hw_desc desc[],
1200			    unsigned int *seq_size)
1201{
1202	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1203	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1204	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1205	int direct = req_ctx->gen_ctx.op_type;
1206	unsigned int data_flow_mode =
1207		cc_get_data_flow(direct, ctx->flow_mode,
1208				 req_ctx->is_single_pass);
1209
1210	if (req_ctx->is_single_pass) {
1211		/**
1212		 * Single-pass flow
1213		 */
1214		cc_set_hmac_desc(req, desc, seq_size);
1215		cc_set_cipher_desc(req, desc, seq_size);
1216		cc_proc_header_desc(req, desc, seq_size);
1217		cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
1218		cc_proc_scheme_desc(req, desc, seq_size);
1219		cc_proc_digest_desc(req, desc, seq_size);
1220		return;
1221	}
1222
1223	/**
1224	 * Double-pass flow
1225	 * Fallback for unsupported single-pass modes,
1226	 * i.e. using assoc. data of non-word-multiple
1227	 */
1228	if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1229		/* encrypt first.. */
1230		cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1231		/* authenc after..*/
1232		cc_set_hmac_desc(req, desc, seq_size);
1233		cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1234		cc_proc_scheme_desc(req, desc, seq_size);
1235		cc_proc_digest_desc(req, desc, seq_size);
1236
1237	} else { /*DECRYPT*/
1238		/* authenc first..*/
1239		cc_set_hmac_desc(req, desc, seq_size);
1240		cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1241		cc_proc_scheme_desc(req, desc, seq_size);
1242		/* decrypt after.. */
1243		cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1244		/* read the digest result with setting the completion bit
1245		 * must be after the cipher operation
1246		 */
1247		cc_proc_digest_desc(req, desc, seq_size);
1248	}
1249}
1250
1251static void
1252cc_xcbc_authenc(struct aead_request *req, struct cc_hw_desc desc[],
1253		unsigned int *seq_size)
1254{
1255	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1256	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1257	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1258	int direct = req_ctx->gen_ctx.op_type;
1259	unsigned int data_flow_mode =
1260		cc_get_data_flow(direct, ctx->flow_mode,
1261				 req_ctx->is_single_pass);
1262
1263	if (req_ctx->is_single_pass) {
1264		/**
1265		 * Single-pass flow
1266		 */
1267		cc_set_xcbc_desc(req, desc, seq_size);
1268		cc_set_cipher_desc(req, desc, seq_size);
1269		cc_proc_header_desc(req, desc, seq_size);
1270		cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
1271		cc_proc_digest_desc(req, desc, seq_size);
1272		return;
1273	}
1274
1275	/**
1276	 * Double-pass flow
1277	 * Fallback for unsupported single-pass modes,
1278	 * i.e. using assoc. data of non-word-multiple
1279	 */
1280	if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1281		/* encrypt first.. */
1282		cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1283		/* authenc after.. */
1284		cc_set_xcbc_desc(req, desc, seq_size);
1285		cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1286		cc_proc_digest_desc(req, desc, seq_size);
1287	} else { /*DECRYPT*/
1288		/* authenc first.. */
1289		cc_set_xcbc_desc(req, desc, seq_size);
1290		cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1291		/* decrypt after..*/
1292		cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1293		/* read the digest result with setting the completion bit
1294		 * must be after the cipher operation
1295		 */
1296		cc_proc_digest_desc(req, desc, seq_size);
1297	}
1298}
1299
1300static int validate_data_size(struct cc_aead_ctx *ctx,
1301			      enum drv_crypto_direction direct,
1302			      struct aead_request *req)
1303{
1304	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1305	struct device *dev = drvdata_to_dev(ctx->drvdata);
1306	unsigned int assoclen = req->assoclen;
1307	unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
1308			(req->cryptlen - ctx->authsize) : req->cryptlen;
1309
1310	if (direct == DRV_CRYPTO_DIRECTION_DECRYPT &&
1311	    req->cryptlen < ctx->authsize)
1312		goto data_size_err;
1313
1314	areq_ctx->is_single_pass = true; /*defaulted to fast flow*/
1315
1316	switch (ctx->flow_mode) {
1317	case S_DIN_to_AES:
1318		if (ctx->cipher_mode == DRV_CIPHER_CBC &&
1319		    !IS_ALIGNED(cipherlen, AES_BLOCK_SIZE))
1320			goto data_size_err;
1321		if (ctx->cipher_mode == DRV_CIPHER_CCM)
1322			break;
1323		if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
1324			if (areq_ctx->plaintext_authenticate_only)
1325				areq_ctx->is_single_pass = false;
1326			break;
1327		}
1328
1329		if (!IS_ALIGNED(assoclen, sizeof(u32)))
1330			areq_ctx->is_single_pass = false;
1331
1332		if (ctx->cipher_mode == DRV_CIPHER_CTR &&
1333		    !IS_ALIGNED(cipherlen, sizeof(u32)))
1334			areq_ctx->is_single_pass = false;
1335
1336		break;
1337	case S_DIN_to_DES:
1338		if (!IS_ALIGNED(cipherlen, DES_BLOCK_SIZE))
1339			goto data_size_err;
1340		if (!IS_ALIGNED(assoclen, DES_BLOCK_SIZE))
1341			areq_ctx->is_single_pass = false;
1342		break;
1343	default:
1344		dev_err(dev, "Unexpected flow mode (%d)\n", ctx->flow_mode);
1345		goto data_size_err;
1346	}
1347
1348	return 0;
1349
1350data_size_err:
1351	return -EINVAL;
1352}
1353
1354static unsigned int format_ccm_a0(u8 *pa0_buff, u32 header_size)
1355{
1356	unsigned int len = 0;
1357
1358	if (header_size == 0)
1359		return 0;
1360
1361	if (header_size < ((1UL << 16) - (1UL << 8))) {
1362		len = 2;
1363
1364		pa0_buff[0] = (header_size >> 8) & 0xFF;
1365		pa0_buff[1] = header_size & 0xFF;
1366	} else {
1367		len = 6;
1368
1369		pa0_buff[0] = 0xFF;
1370		pa0_buff[1] = 0xFE;
1371		pa0_buff[2] = (header_size >> 24) & 0xFF;
1372		pa0_buff[3] = (header_size >> 16) & 0xFF;
1373		pa0_buff[4] = (header_size >> 8) & 0xFF;
1374		pa0_buff[5] = header_size & 0xFF;
1375	}
1376
1377	return len;
1378}
1379
1380static int set_msg_len(u8 *block, unsigned int msglen, unsigned int csize)
1381{
1382	__be32 data;
1383
1384	memset(block, 0, csize);
1385	block += csize;
1386
1387	if (csize >= 4)
1388		csize = 4;
1389	else if (msglen > (1 << (8 * csize)))
1390		return -EOVERFLOW;
1391
1392	data = cpu_to_be32(msglen);
1393	memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
1394
1395	return 0;
1396}
1397
1398static int cc_ccm(struct aead_request *req, struct cc_hw_desc desc[],
1399		  unsigned int *seq_size)
1400{
1401	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1402	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1403	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1404	unsigned int idx = *seq_size;
1405	unsigned int cipher_flow_mode;
1406	dma_addr_t mac_result;
1407
1408	if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1409		cipher_flow_mode = AES_to_HASH_and_DOUT;
1410		mac_result = req_ctx->mac_buf_dma_addr;
1411	} else { /* Encrypt */
1412		cipher_flow_mode = AES_and_HASH;
1413		mac_result = req_ctx->icv_dma_addr;
1414	}
1415
1416	/* load key */
1417	hw_desc_init(&desc[idx]);
1418	set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1419	set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1420		     ((ctx->enc_keylen == 24) ?  CC_AES_KEY_SIZE_MAX :
1421		      ctx->enc_keylen), NS_BIT);
1422	set_key_size_aes(&desc[idx], ctx->enc_keylen);
1423	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1424	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1425	set_flow_mode(&desc[idx], S_DIN_to_AES);
1426	idx++;
1427
1428	/* load ctr state */
1429	hw_desc_init(&desc[idx]);
1430	set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1431	set_key_size_aes(&desc[idx], ctx->enc_keylen);
1432	set_din_type(&desc[idx], DMA_DLLI,
1433		     req_ctx->gen_ctx.iv_dma_addr, AES_BLOCK_SIZE, NS_BIT);
1434	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1435	set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1436	set_flow_mode(&desc[idx], S_DIN_to_AES);
1437	idx++;
1438
1439	/* load MAC key */
1440	hw_desc_init(&desc[idx]);
1441	set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1442	set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1443		     ((ctx->enc_keylen == 24) ?  CC_AES_KEY_SIZE_MAX :
1444		      ctx->enc_keylen), NS_BIT);
1445	set_key_size_aes(&desc[idx], ctx->enc_keylen);
1446	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1447	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1448	set_flow_mode(&desc[idx], S_DIN_to_HASH);
1449	set_aes_not_hash_mode(&desc[idx]);
1450	idx++;
1451
1452	/* load MAC state */
1453	hw_desc_init(&desc[idx]);
1454	set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1455	set_key_size_aes(&desc[idx], ctx->enc_keylen);
1456	set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1457		     AES_BLOCK_SIZE, NS_BIT);
1458	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1459	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1460	set_flow_mode(&desc[idx], S_DIN_to_HASH);
1461	set_aes_not_hash_mode(&desc[idx]);
1462	idx++;
1463
1464	/* process assoc data */
1465	if (req->assoclen > 0) {
1466		cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
1467	} else {
1468		hw_desc_init(&desc[idx]);
1469		set_din_type(&desc[idx], DMA_DLLI,
1470			     sg_dma_address(&req_ctx->ccm_adata_sg),
1471			     AES_BLOCK_SIZE + req_ctx->ccm_hdr_size, NS_BIT);
1472		set_flow_mode(&desc[idx], DIN_HASH);
1473		idx++;
1474	}
1475
1476	/* process the cipher */
1477	if (req_ctx->cryptlen)
1478		cc_proc_cipher_desc(req, cipher_flow_mode, desc, &idx);
1479
1480	/* Read temporal MAC */
1481	hw_desc_init(&desc[idx]);
1482	set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1483	set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, ctx->authsize,
1484		      NS_BIT, 0);
1485	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1486	set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
1487	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1488	set_aes_not_hash_mode(&desc[idx]);
1489	idx++;
1490
1491	/* load AES-CTR state (for last MAC calculation)*/
1492	hw_desc_init(&desc[idx]);
1493	set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1494	set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1495	set_din_type(&desc[idx], DMA_DLLI, req_ctx->ccm_iv0_dma_addr,
1496		     AES_BLOCK_SIZE, NS_BIT);
1497	set_key_size_aes(&desc[idx], ctx->enc_keylen);
1498	set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1499	set_flow_mode(&desc[idx], S_DIN_to_AES);
1500	idx++;
1501
1502	hw_desc_init(&desc[idx]);
1503	set_din_no_dma(&desc[idx], 0, 0xfffff0);
1504	set_dout_no_dma(&desc[idx], 0, 0, 1);
1505	idx++;
1506
1507	/* encrypt the "T" value and store MAC in mac_state */
1508	hw_desc_init(&desc[idx]);
1509	set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1510		     ctx->authsize, NS_BIT);
1511	set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
1512	set_queue_last_ind(ctx->drvdata, &desc[idx]);
1513	set_flow_mode(&desc[idx], DIN_AES_DOUT);
1514	idx++;
1515
1516	*seq_size = idx;
1517	return 0;
1518}
1519
1520static int config_ccm_adata(struct aead_request *req)
1521{
1522	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1523	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1524	struct device *dev = drvdata_to_dev(ctx->drvdata);
1525	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1526	//unsigned int size_of_a = 0, rem_a_size = 0;
1527	unsigned int lp = req->iv[0];
1528	/* Note: The code assume that req->iv[0] already contains the value
1529	 * of L' of RFC3610
1530	 */
1531	unsigned int l = lp + 1;  /* This is L' of RFC 3610. */
1532	unsigned int m = ctx->authsize;  /* This is M' of RFC 3610. */
1533	u8 *b0 = req_ctx->ccm_config + CCM_B0_OFFSET;
1534	u8 *a0 = req_ctx->ccm_config + CCM_A0_OFFSET;
1535	u8 *ctr_count_0 = req_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
1536	unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
1537				 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1538				req->cryptlen :
1539				(req->cryptlen - ctx->authsize);
1540	int rc;
1541
1542	memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
1543	memset(req_ctx->ccm_config, 0, AES_BLOCK_SIZE * 3);
1544
1545	/* taken from crypto/ccm.c */
1546	/* 2 <= L <= 8, so 1 <= L' <= 7. */
1547	if (l < 2 || l > 8) {
1548		dev_err(dev, "illegal iv value %X\n", req->iv[0]);
1549		return -EINVAL;
1550	}
1551	memcpy(b0, req->iv, AES_BLOCK_SIZE);
1552
1553	/* format control info per RFC 3610 and
1554	 * NIST Special Publication 800-38C
1555	 */
1556	*b0 |= (8 * ((m - 2) / 2));
1557	if (req->assoclen > 0)
1558		*b0 |= 64;  /* Enable bit 6 if Adata exists. */
1559
1560	rc = set_msg_len(b0 + 16 - l, cryptlen, l);  /* Write L'. */
1561	if (rc) {
1562		dev_err(dev, "message len overflow detected");
1563		return rc;
1564	}
1565	 /* END of "taken from crypto/ccm.c" */
1566
1567	/* l(a) - size of associated data. */
1568	req_ctx->ccm_hdr_size = format_ccm_a0(a0, req->assoclen);
1569
1570	memset(req->iv + 15 - req->iv[0], 0, req->iv[0] + 1);
1571	req->iv[15] = 1;
1572
1573	memcpy(ctr_count_0, req->iv, AES_BLOCK_SIZE);
1574	ctr_count_0[15] = 0;
1575
1576	return 0;
1577}
1578
1579static void cc_proc_rfc4309_ccm(struct aead_request *req)
1580{
1581	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1582	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1583	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1584
1585	/* L' */
1586	memset(areq_ctx->ctr_iv, 0, AES_BLOCK_SIZE);
1587	/* For RFC 4309, always use 4 bytes for message length
1588	 * (at most 2^32-1 bytes).
1589	 */
1590	areq_ctx->ctr_iv[0] = 3;
1591
1592	/* In RFC 4309 there is an 11-bytes nonce+IV part,
1593	 * that we build here.
1594	 */
1595	memcpy(areq_ctx->ctr_iv + CCM_BLOCK_NONCE_OFFSET, ctx->ctr_nonce,
1596	       CCM_BLOCK_NONCE_SIZE);
1597	memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv,
1598	       CCM_BLOCK_IV_SIZE);
1599	req->iv = areq_ctx->ctr_iv;
1600	req->assoclen -= CCM_BLOCK_IV_SIZE;
1601}
1602
1603static void cc_set_ghash_desc(struct aead_request *req,
1604			      struct cc_hw_desc desc[], unsigned int *seq_size)
1605{
1606	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1607	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1608	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1609	unsigned int idx = *seq_size;
1610
1611	/* load key to AES*/
1612	hw_desc_init(&desc[idx]);
1613	set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
1614	set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1615	set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1616		     ctx->enc_keylen, NS_BIT);
1617	set_key_size_aes(&desc[idx], ctx->enc_keylen);
1618	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1619	set_flow_mode(&desc[idx], S_DIN_to_AES);
1620	idx++;
1621
1622	/* process one zero block to generate hkey */
1623	hw_desc_init(&desc[idx]);
1624	set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
1625	set_dout_dlli(&desc[idx], req_ctx->hkey_dma_addr, AES_BLOCK_SIZE,
1626		      NS_BIT, 0);
1627	set_flow_mode(&desc[idx], DIN_AES_DOUT);
1628	idx++;
1629
1630	/* Memory Barrier */
1631	hw_desc_init(&desc[idx]);
1632	set_din_no_dma(&desc[idx], 0, 0xfffff0);
1633	set_dout_no_dma(&desc[idx], 0, 0, 1);
1634	idx++;
1635
1636	/* Load GHASH subkey */
1637	hw_desc_init(&desc[idx]);
1638	set_din_type(&desc[idx], DMA_DLLI, req_ctx->hkey_dma_addr,
1639		     AES_BLOCK_SIZE, NS_BIT);
1640	set_dout_no_dma(&desc[idx], 0, 0, 1);
1641	set_flow_mode(&desc[idx], S_DIN_to_HASH);
1642	set_aes_not_hash_mode(&desc[idx]);
1643	set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1644	set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1645	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1646	idx++;
1647
1648	/* Configure Hash Engine to work with GHASH.
1649	 * Since it was not possible to extend HASH submodes to add GHASH,
1650	 * The following command is necessary in order to
1651	 * select GHASH (according to HW designers)
1652	 */
1653	hw_desc_init(&desc[idx]);
1654	set_din_no_dma(&desc[idx], 0, 0xfffff0);
1655	set_dout_no_dma(&desc[idx], 0, 0, 1);
1656	set_flow_mode(&desc[idx], S_DIN_to_HASH);
1657	set_aes_not_hash_mode(&desc[idx]);
1658	set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1659	set_cipher_do(&desc[idx], 1); //1=AES_SK RKEK
1660	set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1661	set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1662	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1663	idx++;
1664
1665	/* Load GHASH initial STATE (which is 0). (for any hash there is an
1666	 * initial state)
1667	 */
1668	hw_desc_init(&desc[idx]);
1669	set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
1670	set_dout_no_dma(&desc[idx], 0, 0, 1);
1671	set_flow_mode(&desc[idx], S_DIN_to_HASH);
1672	set_aes_not_hash_mode(&desc[idx]);
1673	set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1674	set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1675	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1676	idx++;
1677
1678	*seq_size = idx;
1679}
1680
1681static void cc_set_gctr_desc(struct aead_request *req, struct cc_hw_desc desc[],
1682			     unsigned int *seq_size)
1683{
1684	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1685	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1686	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1687	unsigned int idx = *seq_size;
1688
1689	/* load key to AES*/
1690	hw_desc_init(&desc[idx]);
1691	set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1692	set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1693	set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1694		     ctx->enc_keylen, NS_BIT);
1695	set_key_size_aes(&desc[idx], ctx->enc_keylen);
1696	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1697	set_flow_mode(&desc[idx], S_DIN_to_AES);
1698	idx++;
1699
1700	if (req_ctx->cryptlen && !req_ctx->plaintext_authenticate_only) {
1701		/* load AES/CTR initial CTR value inc by 2*/
1702		hw_desc_init(&desc[idx]);
1703		set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1704		set_key_size_aes(&desc[idx], ctx->enc_keylen);
1705		set_din_type(&desc[idx], DMA_DLLI,
1706			     req_ctx->gcm_iv_inc2_dma_addr, AES_BLOCK_SIZE,
1707			     NS_BIT);
1708		set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1709		set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1710		set_flow_mode(&desc[idx], S_DIN_to_AES);
1711		idx++;
1712	}
1713
1714	*seq_size = idx;
1715}
1716
1717static void cc_proc_gcm_result(struct aead_request *req,
1718			       struct cc_hw_desc desc[],
1719			       unsigned int *seq_size)
1720{
1721	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1722	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1723	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1724	dma_addr_t mac_result;
1725	unsigned int idx = *seq_size;
1726
1727	if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1728		mac_result = req_ctx->mac_buf_dma_addr;
1729	} else { /* Encrypt */
1730		mac_result = req_ctx->icv_dma_addr;
1731	}
1732
1733	/* process(ghash) gcm_block_len */
1734	hw_desc_init(&desc[idx]);
1735	set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_block_len_dma_addr,
1736		     AES_BLOCK_SIZE, NS_BIT);
1737	set_flow_mode(&desc[idx], DIN_HASH);
1738	idx++;
1739
1740	/* Store GHASH state after GHASH(Associated Data + Cipher +LenBlock) */
1741	hw_desc_init(&desc[idx]);
1742	set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1743	set_din_no_dma(&desc[idx], 0, 0xfffff0);
1744	set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, AES_BLOCK_SIZE,
1745		      NS_BIT, 0);
1746	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1747	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1748	set_aes_not_hash_mode(&desc[idx]);
1749
1750	idx++;
1751
1752	/* load AES/CTR initial CTR value inc by 1*/
1753	hw_desc_init(&desc[idx]);
1754	set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1755	set_key_size_aes(&desc[idx], ctx->enc_keylen);
1756	set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_iv_inc1_dma_addr,
1757		     AES_BLOCK_SIZE, NS_BIT);
1758	set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1759	set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1760	set_flow_mode(&desc[idx], S_DIN_to_AES);
1761	idx++;
1762
1763	/* Memory Barrier */
1764	hw_desc_init(&desc[idx]);
1765	set_din_no_dma(&desc[idx], 0, 0xfffff0);
1766	set_dout_no_dma(&desc[idx], 0, 0, 1);
1767	idx++;
1768
1769	/* process GCTR on stored GHASH and store MAC in mac_state*/
1770	hw_desc_init(&desc[idx]);
1771	set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1772	set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1773		     AES_BLOCK_SIZE, NS_BIT);
1774	set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
1775	set_queue_last_ind(ctx->drvdata, &desc[idx]);
1776	set_flow_mode(&desc[idx], DIN_AES_DOUT);
1777	idx++;
1778
1779	*seq_size = idx;
1780}
1781
1782static int cc_gcm(struct aead_request *req, struct cc_hw_desc desc[],
1783		  unsigned int *seq_size)
1784{
1785	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1786	unsigned int cipher_flow_mode;
1787
1788	if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1789		cipher_flow_mode = AES_and_HASH;
1790	} else { /* Encrypt */
1791		cipher_flow_mode = AES_to_HASH_and_DOUT;
1792	}
1793
1794	//in RFC4543 no data to encrypt. just copy data from src to dest.
1795	if (req_ctx->plaintext_authenticate_only) {
1796		cc_proc_cipher_desc(req, BYPASS, desc, seq_size);
1797		cc_set_ghash_desc(req, desc, seq_size);
1798		/* process(ghash) assoc data */
1799		cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
1800		cc_set_gctr_desc(req, desc, seq_size);
1801		cc_proc_gcm_result(req, desc, seq_size);
1802		return 0;
1803	}
1804
 
 
 
 
 
 
1805	// for gcm and rfc4106.
1806	cc_set_ghash_desc(req, desc, seq_size);
1807	/* process(ghash) assoc data */
1808	if (req->assoclen > 0)
1809		cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
1810	cc_set_gctr_desc(req, desc, seq_size);
1811	/* process(gctr+ghash) */
1812	if (req_ctx->cryptlen)
1813		cc_proc_cipher_desc(req, cipher_flow_mode, desc, seq_size);
1814	cc_proc_gcm_result(req, desc, seq_size);
1815
1816	return 0;
1817}
1818
1819static int config_gcm_context(struct aead_request *req)
1820{
1821	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1822	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1823	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1824	struct device *dev = drvdata_to_dev(ctx->drvdata);
1825
1826	unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
1827				 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1828				req->cryptlen :
1829				(req->cryptlen - ctx->authsize);
1830	__be32 counter = cpu_to_be32(2);
1831
1832	dev_dbg(dev, "%s() cryptlen = %d, req->assoclen = %d ctx->authsize = %d\n",
1833		__func__, cryptlen, req->assoclen, ctx->authsize);
1834
1835	memset(req_ctx->hkey, 0, AES_BLOCK_SIZE);
1836
1837	memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
1838
1839	memcpy(req->iv + 12, &counter, 4);
1840	memcpy(req_ctx->gcm_iv_inc2, req->iv, 16);
1841
1842	counter = cpu_to_be32(1);
1843	memcpy(req->iv + 12, &counter, 4);
1844	memcpy(req_ctx->gcm_iv_inc1, req->iv, 16);
1845
1846	if (!req_ctx->plaintext_authenticate_only) {
1847		__be64 temp64;
1848
1849		temp64 = cpu_to_be64(req->assoclen * 8);
1850		memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
1851		temp64 = cpu_to_be64(cryptlen * 8);
1852		memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
1853	} else {
1854		/* rfc4543=>  all data(AAD,IV,Plain) are considered additional
1855		 * data that is nothing is encrypted.
1856		 */
1857		__be64 temp64;
1858
1859		temp64 = cpu_to_be64((req->assoclen + GCM_BLOCK_RFC4_IV_SIZE +
1860				      cryptlen) * 8);
1861		memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
1862		temp64 = 0;
1863		memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
1864	}
1865
1866	return 0;
1867}
1868
1869static void cc_proc_rfc4_gcm(struct aead_request *req)
1870{
1871	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1872	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1873	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1874
1875	memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_NONCE_OFFSET,
1876	       ctx->ctr_nonce, GCM_BLOCK_RFC4_NONCE_SIZE);
1877	memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv,
1878	       GCM_BLOCK_RFC4_IV_SIZE);
1879	req->iv = areq_ctx->ctr_iv;
1880	req->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
1881}
1882
1883static int cc_proc_aead(struct aead_request *req,
1884			enum drv_crypto_direction direct)
1885{
1886	int rc = 0;
1887	int seq_len = 0;
1888	struct cc_hw_desc desc[MAX_AEAD_PROCESS_SEQ];
1889	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1890	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1891	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1892	struct device *dev = drvdata_to_dev(ctx->drvdata);
1893	struct cc_crypto_req cc_req = {};
1894
1895	dev_dbg(dev, "%s context=%p req=%p iv=%p src=%p src_ofs=%d dst=%p dst_ofs=%d cryptolen=%d\n",
1896		((direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Enc" : "Dec"),
1897		ctx, req, req->iv, sg_virt(req->src), req->src->offset,
1898		sg_virt(req->dst), req->dst->offset, req->cryptlen);
1899
1900	/* STAT_PHASE_0: Init and sanity checks */
1901
1902	/* Check data length according to mode */
1903	if (validate_data_size(ctx, direct, req)) {
1904		dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n",
1905			req->cryptlen, req->assoclen);
1906		crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
1907		return -EINVAL;
1908	}
1909
1910	/* Setup request structure */
1911	cc_req.user_cb = (void *)cc_aead_complete;
1912	cc_req.user_arg = (void *)req;
1913
1914	/* Setup request context */
1915	areq_ctx->gen_ctx.op_type = direct;
1916	areq_ctx->req_authsize = ctx->authsize;
1917	areq_ctx->cipher_mode = ctx->cipher_mode;
1918
1919	/* STAT_PHASE_1: Map buffers */
1920
1921	if (ctx->cipher_mode == DRV_CIPHER_CTR) {
1922		/* Build CTR IV - Copy nonce from last 4 bytes in
1923		 * CTR key to first 4 bytes in CTR IV
1924		 */
1925		memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce,
1926		       CTR_RFC3686_NONCE_SIZE);
1927		if (!areq_ctx->backup_giv) /*User none-generated IV*/
1928			memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE,
1929			       req->iv, CTR_RFC3686_IV_SIZE);
1930		/* Initialize counter portion of counter block */
1931		*(__be32 *)(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE +
1932			    CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1933
1934		/* Replace with counter iv */
1935		req->iv = areq_ctx->ctr_iv;
1936		areq_ctx->hw_iv_size = CTR_RFC3686_BLOCK_SIZE;
1937	} else if ((ctx->cipher_mode == DRV_CIPHER_CCM) ||
1938		   (ctx->cipher_mode == DRV_CIPHER_GCTR)) {
1939		areq_ctx->hw_iv_size = AES_BLOCK_SIZE;
1940		if (areq_ctx->ctr_iv != req->iv) {
1941			memcpy(areq_ctx->ctr_iv, req->iv,
1942			       crypto_aead_ivsize(tfm));
1943			req->iv = areq_ctx->ctr_iv;
1944		}
1945	}  else {
1946		areq_ctx->hw_iv_size = crypto_aead_ivsize(tfm);
1947	}
1948
1949	if (ctx->cipher_mode == DRV_CIPHER_CCM) {
1950		rc = config_ccm_adata(req);
1951		if (rc) {
1952			dev_dbg(dev, "config_ccm_adata() returned with a failure %d!",
1953				rc);
1954			goto exit;
1955		}
1956	} else {
1957		areq_ctx->ccm_hdr_size = ccm_header_size_null;
1958	}
1959
1960	if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
1961		rc = config_gcm_context(req);
1962		if (rc) {
1963			dev_dbg(dev, "config_gcm_context() returned with a failure %d!",
1964				rc);
1965			goto exit;
1966		}
1967	}
1968
1969	rc = cc_map_aead_request(ctx->drvdata, req);
1970	if (rc) {
1971		dev_err(dev, "map_request() failed\n");
1972		goto exit;
1973	}
1974
1975	/* do we need to generate IV? */
1976	if (areq_ctx->backup_giv) {
1977		/* set the DMA mapped IV address*/
1978		if (ctx->cipher_mode == DRV_CIPHER_CTR) {
1979			cc_req.ivgen_dma_addr[0] =
1980				areq_ctx->gen_ctx.iv_dma_addr +
1981				CTR_RFC3686_NONCE_SIZE;
1982			cc_req.ivgen_dma_addr_len = 1;
1983		} else if (ctx->cipher_mode == DRV_CIPHER_CCM) {
1984			/* In ccm, the IV needs to exist both inside B0 and
1985			 * inside the counter.It is also copied to iv_dma_addr
1986			 * for other reasons (like returning it to the user).
1987			 * So, using 3 (identical) IV outputs.
1988			 */
1989			cc_req.ivgen_dma_addr[0] =
1990				areq_ctx->gen_ctx.iv_dma_addr +
1991				CCM_BLOCK_IV_OFFSET;
1992			cc_req.ivgen_dma_addr[1] =
1993				sg_dma_address(&areq_ctx->ccm_adata_sg) +
1994				CCM_B0_OFFSET + CCM_BLOCK_IV_OFFSET;
1995			cc_req.ivgen_dma_addr[2] =
1996				sg_dma_address(&areq_ctx->ccm_adata_sg) +
1997				CCM_CTR_COUNT_0_OFFSET + CCM_BLOCK_IV_OFFSET;
1998			cc_req.ivgen_dma_addr_len = 3;
1999		} else {
2000			cc_req.ivgen_dma_addr[0] =
2001				areq_ctx->gen_ctx.iv_dma_addr;
2002			cc_req.ivgen_dma_addr_len = 1;
2003		}
2004
2005		/* set the IV size (8/16 B long)*/
2006		cc_req.ivgen_size = crypto_aead_ivsize(tfm);
2007	}
2008
2009	/* STAT_PHASE_2: Create sequence */
2010
2011	/* Load MLLI tables to SRAM if necessary */
2012	cc_mlli_to_sram(req, desc, &seq_len);
2013
2014	/*TODO: move seq len by reference */
2015	switch (ctx->auth_mode) {
2016	case DRV_HASH_SHA1:
2017	case DRV_HASH_SHA256:
2018		cc_hmac_authenc(req, desc, &seq_len);
2019		break;
2020	case DRV_HASH_XCBC_MAC:
2021		cc_xcbc_authenc(req, desc, &seq_len);
2022		break;
2023	case DRV_HASH_NULL:
2024		if (ctx->cipher_mode == DRV_CIPHER_CCM)
2025			cc_ccm(req, desc, &seq_len);
2026		if (ctx->cipher_mode == DRV_CIPHER_GCTR)
2027			cc_gcm(req, desc, &seq_len);
2028		break;
2029	default:
2030		dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
2031		cc_unmap_aead_request(dev, req);
2032		rc = -ENOTSUPP;
2033		goto exit;
2034	}
2035
2036	/* STAT_PHASE_3: Lock HW and push sequence */
2037
2038	rc = cc_send_request(ctx->drvdata, &cc_req, desc, seq_len, &req->base);
2039
2040	if (rc != -EINPROGRESS && rc != -EBUSY) {
2041		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
2042		cc_unmap_aead_request(dev, req);
2043	}
2044
2045exit:
2046	return rc;
2047}
2048
2049static int cc_aead_encrypt(struct aead_request *req)
2050{
2051	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2052	int rc;
2053
 
 
2054	/* No generated IV required */
2055	areq_ctx->backup_iv = req->iv;
2056	areq_ctx->backup_giv = NULL;
2057	areq_ctx->is_gcm4543 = false;
2058
2059	areq_ctx->plaintext_authenticate_only = false;
2060
2061	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2062	if (rc != -EINPROGRESS && rc != -EBUSY)
2063		req->iv = areq_ctx->backup_iv;
2064
2065	return rc;
2066}
2067
2068static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
2069{
2070	/* Very similar to cc_aead_encrypt() above. */
2071
2072	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2073	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2074	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2075	struct device *dev = drvdata_to_dev(ctx->drvdata);
2076	int rc = -EINVAL;
2077
2078	if (!valid_assoclen(req)) {
2079		dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2080		goto out;
2081	}
 
2082
2083	/* No generated IV required */
2084	areq_ctx->backup_iv = req->iv;
2085	areq_ctx->backup_giv = NULL;
2086	areq_ctx->is_gcm4543 = true;
2087
2088	cc_proc_rfc4309_ccm(req);
2089
2090	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2091	if (rc != -EINPROGRESS && rc != -EBUSY)
2092		req->iv = areq_ctx->backup_iv;
2093out:
2094	return rc;
2095}
2096
2097static int cc_aead_decrypt(struct aead_request *req)
2098{
2099	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2100	int rc;
2101
 
 
2102	/* No generated IV required */
2103	areq_ctx->backup_iv = req->iv;
2104	areq_ctx->backup_giv = NULL;
2105	areq_ctx->is_gcm4543 = false;
2106
2107	areq_ctx->plaintext_authenticate_only = false;
2108
2109	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2110	if (rc != -EINPROGRESS && rc != -EBUSY)
2111		req->iv = areq_ctx->backup_iv;
2112
2113	return rc;
2114}
2115
2116static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
2117{
2118	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2119	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2120	struct device *dev = drvdata_to_dev(ctx->drvdata);
2121	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2122	int rc = -EINVAL;
2123
2124	if (!valid_assoclen(req)) {
2125		dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2126		goto out;
2127	}
 
2128
2129	/* No generated IV required */
2130	areq_ctx->backup_iv = req->iv;
2131	areq_ctx->backup_giv = NULL;
2132
2133	areq_ctx->is_gcm4543 = true;
2134	cc_proc_rfc4309_ccm(req);
2135
2136	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2137	if (rc != -EINPROGRESS && rc != -EBUSY)
2138		req->iv = areq_ctx->backup_iv;
2139
2140out:
2141	return rc;
2142}
2143
2144static int cc_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
2145				 unsigned int keylen)
2146{
2147	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2148	struct device *dev = drvdata_to_dev(ctx->drvdata);
2149
2150	dev_dbg(dev, "%s()  keylen %d, key %p\n", __func__, keylen, key);
2151
2152	if (keylen < 4)
2153		return -EINVAL;
2154
2155	keylen -= 4;
2156	memcpy(ctx->ctr_nonce, key + keylen, 4);
2157
2158	return cc_aead_setkey(tfm, key, keylen);
2159}
2160
2161static int cc_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
2162				 unsigned int keylen)
2163{
2164	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2165	struct device *dev = drvdata_to_dev(ctx->drvdata);
2166
2167	dev_dbg(dev, "%s()  keylen %d, key %p\n", __func__, keylen, key);
2168
2169	if (keylen < 4)
2170		return -EINVAL;
2171
2172	keylen -= 4;
2173	memcpy(ctx->ctr_nonce, key + keylen, 4);
2174
2175	return cc_aead_setkey(tfm, key, keylen);
2176}
2177
2178static int cc_gcm_setauthsize(struct crypto_aead *authenc,
2179			      unsigned int authsize)
2180{
2181	switch (authsize) {
2182	case 4:
2183	case 8:
2184	case 12:
2185	case 13:
2186	case 14:
2187	case 15:
2188	case 16:
2189		break;
2190	default:
2191		return -EINVAL;
2192	}
2193
2194	return cc_aead_setauthsize(authenc, authsize);
2195}
2196
2197static int cc_rfc4106_gcm_setauthsize(struct crypto_aead *authenc,
2198				      unsigned int authsize)
2199{
2200	struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
2201	struct device *dev = drvdata_to_dev(ctx->drvdata);
2202
2203	dev_dbg(dev, "authsize %d\n", authsize);
2204
2205	switch (authsize) {
2206	case 8:
2207	case 12:
2208	case 16:
2209		break;
2210	default:
2211		return -EINVAL;
2212	}
2213
2214	return cc_aead_setauthsize(authenc, authsize);
2215}
2216
2217static int cc_rfc4543_gcm_setauthsize(struct crypto_aead *authenc,
2218				      unsigned int authsize)
2219{
2220	struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
2221	struct device *dev = drvdata_to_dev(ctx->drvdata);
2222
2223	dev_dbg(dev, "authsize %d\n", authsize);
2224
2225	if (authsize != 16)
2226		return -EINVAL;
2227
2228	return cc_aead_setauthsize(authenc, authsize);
2229}
2230
2231static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
2232{
2233	/* Very similar to cc_aead_encrypt() above. */
 
2234
2235	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2236	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2237	struct device *dev = drvdata_to_dev(ctx->drvdata);
2238	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2239	int rc = -EINVAL;
2240
2241	if (!valid_assoclen(req)) {
2242		dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2243		goto out;
2244	}
2245
2246	/* No generated IV required */
2247	areq_ctx->backup_iv = req->iv;
2248	areq_ctx->backup_giv = NULL;
2249
2250	areq_ctx->plaintext_authenticate_only = false;
2251
2252	cc_proc_rfc4_gcm(req);
2253	areq_ctx->is_gcm4543 = true;
2254
2255	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2256	if (rc != -EINPROGRESS && rc != -EBUSY)
2257		req->iv = areq_ctx->backup_iv;
2258out:
2259	return rc;
2260}
2261
2262static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
2263{
2264	/* Very similar to cc_aead_encrypt() above. */
 
 
 
 
 
2265
2266	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2267	int rc;
2268
2269	//plaintext is not encryped with rfc4543
2270	areq_ctx->plaintext_authenticate_only = true;
2271
2272	/* No generated IV required */
2273	areq_ctx->backup_iv = req->iv;
2274	areq_ctx->backup_giv = NULL;
2275
2276	cc_proc_rfc4_gcm(req);
2277	areq_ctx->is_gcm4543 = true;
2278
2279	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2280	if (rc != -EINPROGRESS && rc != -EBUSY)
2281		req->iv = areq_ctx->backup_iv;
2282
2283	return rc;
2284}
2285
2286static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
2287{
2288	/* Very similar to cc_aead_decrypt() above. */
 
2289
2290	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2291	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2292	struct device *dev = drvdata_to_dev(ctx->drvdata);
2293	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2294	int rc = -EINVAL;
2295
2296	if (!valid_assoclen(req)) {
2297		dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2298		goto out;
2299	}
2300
2301	/* No generated IV required */
2302	areq_ctx->backup_iv = req->iv;
2303	areq_ctx->backup_giv = NULL;
2304
2305	areq_ctx->plaintext_authenticate_only = false;
2306
2307	cc_proc_rfc4_gcm(req);
2308	areq_ctx->is_gcm4543 = true;
2309
2310	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2311	if (rc != -EINPROGRESS && rc != -EBUSY)
2312		req->iv = areq_ctx->backup_iv;
2313out:
2314	return rc;
2315}
2316
2317static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
2318{
2319	/* Very similar to cc_aead_decrypt() above. */
 
2320
2321	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2322	int rc;
 
 
 
2323
2324	//plaintext is not decryped with rfc4543
2325	areq_ctx->plaintext_authenticate_only = true;
2326
2327	/* No generated IV required */
2328	areq_ctx->backup_iv = req->iv;
2329	areq_ctx->backup_giv = NULL;
2330
2331	cc_proc_rfc4_gcm(req);
2332	areq_ctx->is_gcm4543 = true;
2333
2334	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2335	if (rc != -EINPROGRESS && rc != -EBUSY)
2336		req->iv = areq_ctx->backup_iv;
2337
2338	return rc;
2339}
2340
2341/* aead alg */
2342static struct cc_alg_template aead_algs[] = {
2343	{
2344		.name = "authenc(hmac(sha1),cbc(aes))",
2345		.driver_name = "authenc-hmac-sha1-cbc-aes-ccree",
2346		.blocksize = AES_BLOCK_SIZE,
2347		.type = CRYPTO_ALG_TYPE_AEAD,
2348		.template_aead = {
2349			.setkey = cc_aead_setkey,
2350			.setauthsize = cc_aead_setauthsize,
2351			.encrypt = cc_aead_encrypt,
2352			.decrypt = cc_aead_decrypt,
2353			.init = cc_aead_init,
2354			.exit = cc_aead_exit,
2355			.ivsize = AES_BLOCK_SIZE,
2356			.maxauthsize = SHA1_DIGEST_SIZE,
2357		},
2358		.cipher_mode = DRV_CIPHER_CBC,
2359		.flow_mode = S_DIN_to_AES,
2360		.auth_mode = DRV_HASH_SHA1,
2361		.min_hw_rev = CC_HW_REV_630,
 
2362	},
2363	{
2364		.name = "authenc(hmac(sha1),cbc(des3_ede))",
2365		.driver_name = "authenc-hmac-sha1-cbc-des3-ccree",
2366		.blocksize = DES3_EDE_BLOCK_SIZE,
2367		.type = CRYPTO_ALG_TYPE_AEAD,
2368		.template_aead = {
2369			.setkey = cc_aead_setkey,
2370			.setauthsize = cc_aead_setauthsize,
2371			.encrypt = cc_aead_encrypt,
2372			.decrypt = cc_aead_decrypt,
2373			.init = cc_aead_init,
2374			.exit = cc_aead_exit,
2375			.ivsize = DES3_EDE_BLOCK_SIZE,
2376			.maxauthsize = SHA1_DIGEST_SIZE,
2377		},
2378		.cipher_mode = DRV_CIPHER_CBC,
2379		.flow_mode = S_DIN_to_DES,
2380		.auth_mode = DRV_HASH_SHA1,
2381		.min_hw_rev = CC_HW_REV_630,
 
2382	},
2383	{
2384		.name = "authenc(hmac(sha256),cbc(aes))",
2385		.driver_name = "authenc-hmac-sha256-cbc-aes-ccree",
2386		.blocksize = AES_BLOCK_SIZE,
2387		.type = CRYPTO_ALG_TYPE_AEAD,
2388		.template_aead = {
2389			.setkey = cc_aead_setkey,
2390			.setauthsize = cc_aead_setauthsize,
2391			.encrypt = cc_aead_encrypt,
2392			.decrypt = cc_aead_decrypt,
2393			.init = cc_aead_init,
2394			.exit = cc_aead_exit,
2395			.ivsize = AES_BLOCK_SIZE,
2396			.maxauthsize = SHA256_DIGEST_SIZE,
2397		},
2398		.cipher_mode = DRV_CIPHER_CBC,
2399		.flow_mode = S_DIN_to_AES,
2400		.auth_mode = DRV_HASH_SHA256,
2401		.min_hw_rev = CC_HW_REV_630,
 
2402	},
2403	{
2404		.name = "authenc(hmac(sha256),cbc(des3_ede))",
2405		.driver_name = "authenc-hmac-sha256-cbc-des3-ccree",
2406		.blocksize = DES3_EDE_BLOCK_SIZE,
2407		.type = CRYPTO_ALG_TYPE_AEAD,
2408		.template_aead = {
2409			.setkey = cc_aead_setkey,
2410			.setauthsize = cc_aead_setauthsize,
2411			.encrypt = cc_aead_encrypt,
2412			.decrypt = cc_aead_decrypt,
2413			.init = cc_aead_init,
2414			.exit = cc_aead_exit,
2415			.ivsize = DES3_EDE_BLOCK_SIZE,
2416			.maxauthsize = SHA256_DIGEST_SIZE,
2417		},
2418		.cipher_mode = DRV_CIPHER_CBC,
2419		.flow_mode = S_DIN_to_DES,
2420		.auth_mode = DRV_HASH_SHA256,
2421		.min_hw_rev = CC_HW_REV_630,
 
2422	},
2423	{
2424		.name = "authenc(xcbc(aes),cbc(aes))",
2425		.driver_name = "authenc-xcbc-aes-cbc-aes-ccree",
2426		.blocksize = AES_BLOCK_SIZE,
2427		.type = CRYPTO_ALG_TYPE_AEAD,
2428		.template_aead = {
2429			.setkey = cc_aead_setkey,
2430			.setauthsize = cc_aead_setauthsize,
2431			.encrypt = cc_aead_encrypt,
2432			.decrypt = cc_aead_decrypt,
2433			.init = cc_aead_init,
2434			.exit = cc_aead_exit,
2435			.ivsize = AES_BLOCK_SIZE,
2436			.maxauthsize = AES_BLOCK_SIZE,
2437		},
2438		.cipher_mode = DRV_CIPHER_CBC,
2439		.flow_mode = S_DIN_to_AES,
2440		.auth_mode = DRV_HASH_XCBC_MAC,
2441		.min_hw_rev = CC_HW_REV_630,
 
2442	},
2443	{
2444		.name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
2445		.driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-ccree",
2446		.blocksize = 1,
2447		.type = CRYPTO_ALG_TYPE_AEAD,
2448		.template_aead = {
2449			.setkey = cc_aead_setkey,
2450			.setauthsize = cc_aead_setauthsize,
2451			.encrypt = cc_aead_encrypt,
2452			.decrypt = cc_aead_decrypt,
2453			.init = cc_aead_init,
2454			.exit = cc_aead_exit,
2455			.ivsize = CTR_RFC3686_IV_SIZE,
2456			.maxauthsize = SHA1_DIGEST_SIZE,
2457		},
2458		.cipher_mode = DRV_CIPHER_CTR,
2459		.flow_mode = S_DIN_to_AES,
2460		.auth_mode = DRV_HASH_SHA1,
2461		.min_hw_rev = CC_HW_REV_630,
 
2462	},
2463	{
2464		.name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
2465		.driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-ccree",
2466		.blocksize = 1,
2467		.type = CRYPTO_ALG_TYPE_AEAD,
2468		.template_aead = {
2469			.setkey = cc_aead_setkey,
2470			.setauthsize = cc_aead_setauthsize,
2471			.encrypt = cc_aead_encrypt,
2472			.decrypt = cc_aead_decrypt,
2473			.init = cc_aead_init,
2474			.exit = cc_aead_exit,
2475			.ivsize = CTR_RFC3686_IV_SIZE,
2476			.maxauthsize = SHA256_DIGEST_SIZE,
2477		},
2478		.cipher_mode = DRV_CIPHER_CTR,
2479		.flow_mode = S_DIN_to_AES,
2480		.auth_mode = DRV_HASH_SHA256,
2481		.min_hw_rev = CC_HW_REV_630,
 
2482	},
2483	{
2484		.name = "authenc(xcbc(aes),rfc3686(ctr(aes)))",
2485		.driver_name = "authenc-xcbc-aes-rfc3686-ctr-aes-ccree",
2486		.blocksize = 1,
2487		.type = CRYPTO_ALG_TYPE_AEAD,
2488		.template_aead = {
2489			.setkey = cc_aead_setkey,
2490			.setauthsize = cc_aead_setauthsize,
2491			.encrypt = cc_aead_encrypt,
2492			.decrypt = cc_aead_decrypt,
2493			.init = cc_aead_init,
2494			.exit = cc_aead_exit,
2495			.ivsize = CTR_RFC3686_IV_SIZE,
2496			.maxauthsize = AES_BLOCK_SIZE,
2497		},
2498		.cipher_mode = DRV_CIPHER_CTR,
2499		.flow_mode = S_DIN_to_AES,
2500		.auth_mode = DRV_HASH_XCBC_MAC,
2501		.min_hw_rev = CC_HW_REV_630,
 
2502	},
2503	{
2504		.name = "ccm(aes)",
2505		.driver_name = "ccm-aes-ccree",
2506		.blocksize = 1,
2507		.type = CRYPTO_ALG_TYPE_AEAD,
2508		.template_aead = {
2509			.setkey = cc_aead_setkey,
2510			.setauthsize = cc_ccm_setauthsize,
2511			.encrypt = cc_aead_encrypt,
2512			.decrypt = cc_aead_decrypt,
2513			.init = cc_aead_init,
2514			.exit = cc_aead_exit,
2515			.ivsize = AES_BLOCK_SIZE,
2516			.maxauthsize = AES_BLOCK_SIZE,
2517		},
2518		.cipher_mode = DRV_CIPHER_CCM,
2519		.flow_mode = S_DIN_to_AES,
2520		.auth_mode = DRV_HASH_NULL,
2521		.min_hw_rev = CC_HW_REV_630,
 
2522	},
2523	{
2524		.name = "rfc4309(ccm(aes))",
2525		.driver_name = "rfc4309-ccm-aes-ccree",
2526		.blocksize = 1,
2527		.type = CRYPTO_ALG_TYPE_AEAD,
2528		.template_aead = {
2529			.setkey = cc_rfc4309_ccm_setkey,
2530			.setauthsize = cc_rfc4309_ccm_setauthsize,
2531			.encrypt = cc_rfc4309_ccm_encrypt,
2532			.decrypt = cc_rfc4309_ccm_decrypt,
2533			.init = cc_aead_init,
2534			.exit = cc_aead_exit,
2535			.ivsize = CCM_BLOCK_IV_SIZE,
2536			.maxauthsize = AES_BLOCK_SIZE,
2537		},
2538		.cipher_mode = DRV_CIPHER_CCM,
2539		.flow_mode = S_DIN_to_AES,
2540		.auth_mode = DRV_HASH_NULL,
2541		.min_hw_rev = CC_HW_REV_630,
 
2542	},
2543	{
2544		.name = "gcm(aes)",
2545		.driver_name = "gcm-aes-ccree",
2546		.blocksize = 1,
2547		.type = CRYPTO_ALG_TYPE_AEAD,
2548		.template_aead = {
2549			.setkey = cc_aead_setkey,
2550			.setauthsize = cc_gcm_setauthsize,
2551			.encrypt = cc_aead_encrypt,
2552			.decrypt = cc_aead_decrypt,
2553			.init = cc_aead_init,
2554			.exit = cc_aead_exit,
2555			.ivsize = 12,
2556			.maxauthsize = AES_BLOCK_SIZE,
2557		},
2558		.cipher_mode = DRV_CIPHER_GCTR,
2559		.flow_mode = S_DIN_to_AES,
2560		.auth_mode = DRV_HASH_NULL,
2561		.min_hw_rev = CC_HW_REV_630,
 
2562	},
2563	{
2564		.name = "rfc4106(gcm(aes))",
2565		.driver_name = "rfc4106-gcm-aes-ccree",
2566		.blocksize = 1,
2567		.type = CRYPTO_ALG_TYPE_AEAD,
2568		.template_aead = {
2569			.setkey = cc_rfc4106_gcm_setkey,
2570			.setauthsize = cc_rfc4106_gcm_setauthsize,
2571			.encrypt = cc_rfc4106_gcm_encrypt,
2572			.decrypt = cc_rfc4106_gcm_decrypt,
2573			.init = cc_aead_init,
2574			.exit = cc_aead_exit,
2575			.ivsize = GCM_BLOCK_RFC4_IV_SIZE,
2576			.maxauthsize = AES_BLOCK_SIZE,
2577		},
2578		.cipher_mode = DRV_CIPHER_GCTR,
2579		.flow_mode = S_DIN_to_AES,
2580		.auth_mode = DRV_HASH_NULL,
2581		.min_hw_rev = CC_HW_REV_630,
 
2582	},
2583	{
2584		.name = "rfc4543(gcm(aes))",
2585		.driver_name = "rfc4543-gcm-aes-ccree",
2586		.blocksize = 1,
2587		.type = CRYPTO_ALG_TYPE_AEAD,
2588		.template_aead = {
2589			.setkey = cc_rfc4543_gcm_setkey,
2590			.setauthsize = cc_rfc4543_gcm_setauthsize,
2591			.encrypt = cc_rfc4543_gcm_encrypt,
2592			.decrypt = cc_rfc4543_gcm_decrypt,
2593			.init = cc_aead_init,
2594			.exit = cc_aead_exit,
2595			.ivsize = GCM_BLOCK_RFC4_IV_SIZE,
2596			.maxauthsize = AES_BLOCK_SIZE,
2597		},
2598		.cipher_mode = DRV_CIPHER_GCTR,
2599		.flow_mode = S_DIN_to_AES,
2600		.auth_mode = DRV_HASH_NULL,
2601		.min_hw_rev = CC_HW_REV_630,
 
2602	},
2603};
2604
2605static struct cc_crypto_alg *cc_create_aead_alg(struct cc_alg_template *tmpl,
2606						struct device *dev)
2607{
2608	struct cc_crypto_alg *t_alg;
2609	struct aead_alg *alg;
2610
2611	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
2612	if (!t_alg)
2613		return ERR_PTR(-ENOMEM);
2614
2615	alg = &tmpl->template_aead;
2616
2617	snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
2618	snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2619		 tmpl->driver_name);
 
 
 
 
2620	alg->base.cra_module = THIS_MODULE;
2621	alg->base.cra_priority = CC_CRA_PRIO;
2622
2623	alg->base.cra_ctxsize = sizeof(struct cc_aead_ctx);
2624	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
2625			 tmpl->type;
2626	alg->init = cc_aead_init;
2627	alg->exit = cc_aead_exit;
2628
2629	t_alg->aead_alg = *alg;
2630
2631	t_alg->cipher_mode = tmpl->cipher_mode;
2632	t_alg->flow_mode = tmpl->flow_mode;
2633	t_alg->auth_mode = tmpl->auth_mode;
2634
2635	return t_alg;
2636}
2637
2638int cc_aead_free(struct cc_drvdata *drvdata)
2639{
2640	struct cc_crypto_alg *t_alg, *n;
2641	struct cc_aead_handle *aead_handle =
2642		(struct cc_aead_handle *)drvdata->aead_handle;
2643
2644	if (aead_handle) {
2645		/* Remove registered algs */
2646		list_for_each_entry_safe(t_alg, n, &aead_handle->aead_list,
2647					 entry) {
2648			crypto_unregister_aead(&t_alg->aead_alg);
2649			list_del(&t_alg->entry);
2650			kfree(t_alg);
2651		}
2652		kfree(aead_handle);
2653		drvdata->aead_handle = NULL;
2654	}
2655
2656	return 0;
2657}
2658
2659int cc_aead_alloc(struct cc_drvdata *drvdata)
2660{
2661	struct cc_aead_handle *aead_handle;
2662	struct cc_crypto_alg *t_alg;
2663	int rc = -ENOMEM;
2664	int alg;
2665	struct device *dev = drvdata_to_dev(drvdata);
2666
2667	aead_handle = kmalloc(sizeof(*aead_handle), GFP_KERNEL);
2668	if (!aead_handle) {
2669		rc = -ENOMEM;
2670		goto fail0;
2671	}
2672
2673	INIT_LIST_HEAD(&aead_handle->aead_list);
2674	drvdata->aead_handle = aead_handle;
2675
2676	aead_handle->sram_workspace_addr = cc_sram_alloc(drvdata,
2677							 MAX_HMAC_DIGEST_SIZE);
2678
2679	if (aead_handle->sram_workspace_addr == NULL_SRAM_ADDR) {
2680		dev_err(dev, "SRAM pool exhausted\n");
2681		rc = -ENOMEM;
2682		goto fail1;
2683	}
2684
2685	/* Linux crypto */
2686	for (alg = 0; alg < ARRAY_SIZE(aead_algs); alg++) {
2687		if (aead_algs[alg].min_hw_rev > drvdata->hw_rev)
 
2688			continue;
2689
2690		t_alg = cc_create_aead_alg(&aead_algs[alg], dev);
2691		if (IS_ERR(t_alg)) {
2692			rc = PTR_ERR(t_alg);
2693			dev_err(dev, "%s alg allocation failed\n",
2694				aead_algs[alg].driver_name);
2695			goto fail1;
2696		}
2697		t_alg->drvdata = drvdata;
2698		rc = crypto_register_aead(&t_alg->aead_alg);
2699		if (rc) {
2700			dev_err(dev, "%s alg registration failed\n",
2701				t_alg->aead_alg.base.cra_driver_name);
2702			goto fail2;
2703		} else {
2704			list_add_tail(&t_alg->entry, &aead_handle->aead_list);
2705			dev_dbg(dev, "Registered %s\n",
2706				t_alg->aead_alg.base.cra_driver_name);
2707		}
 
 
 
 
2708	}
2709
2710	return 0;
2711
2712fail2:
2713	kfree(t_alg);
2714fail1:
2715	cc_aead_free(drvdata);
2716fail0:
2717	return rc;
2718}