Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
   3
   4#include <linux/kernel.h>
   5#include <linux/module.h>
   6#include <crypto/algapi.h>
   7#include <crypto/internal/aead.h>
   8#include <crypto/authenc.h>
   9#include <crypto/gcm.h>
  10#include <linux/rtnetlink.h>
  11#include <crypto/internal/des.h>
 
  12#include "cc_driver.h"
  13#include "cc_buffer_mgr.h"
  14#include "cc_aead.h"
  15#include "cc_request_mgr.h"
  16#include "cc_hash.h"
  17#include "cc_sram_mgr.h"
  18
  19#define template_aead	template_u.aead
  20
  21#define MAX_AEAD_SETKEY_SEQ 12
  22#define MAX_AEAD_PROCESS_SEQ 23
  23
  24#define MAX_HMAC_DIGEST_SIZE (SHA256_DIGEST_SIZE)
  25#define MAX_HMAC_BLOCK_SIZE (SHA256_BLOCK_SIZE)
  26
  27#define MAX_NONCE_SIZE CTR_RFC3686_NONCE_SIZE
  28
  29struct cc_aead_handle {
  30	u32 sram_workspace_addr;
  31	struct list_head aead_list;
  32};
  33
  34struct cc_hmac_s {
  35	u8 *padded_authkey;
  36	u8 *ipad_opad; /* IPAD, OPAD*/
  37	dma_addr_t padded_authkey_dma_addr;
  38	dma_addr_t ipad_opad_dma_addr;
  39};
  40
  41struct cc_xcbc_s {
  42	u8 *xcbc_keys; /* K1,K2,K3 */
  43	dma_addr_t xcbc_keys_dma_addr;
  44};
  45
  46struct cc_aead_ctx {
  47	struct cc_drvdata *drvdata;
  48	u8 ctr_nonce[MAX_NONCE_SIZE]; /* used for ctr3686 iv and aes ccm */
  49	u8 *enckey;
  50	dma_addr_t enckey_dma_addr;
  51	union {
  52		struct cc_hmac_s hmac;
  53		struct cc_xcbc_s xcbc;
  54	} auth_state;
  55	unsigned int enc_keylen;
  56	unsigned int auth_keylen;
  57	unsigned int authsize; /* Actual (reduced?) size of the MAC/ICv */
  58	unsigned int hash_len;
  59	enum drv_cipher_mode cipher_mode;
  60	enum cc_flow_mode flow_mode;
  61	enum drv_hash_mode auth_mode;
  62};
  63
 
 
 
 
 
  64static void cc_aead_exit(struct crypto_aead *tfm)
  65{
  66	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
  67	struct device *dev = drvdata_to_dev(ctx->drvdata);
  68
  69	dev_dbg(dev, "Clearing context @%p for %s\n", crypto_aead_ctx(tfm),
  70		crypto_tfm_alg_name(&tfm->base));
  71
  72	/* Unmap enckey buffer */
  73	if (ctx->enckey) {
  74		dma_free_coherent(dev, AES_MAX_KEY_SIZE, ctx->enckey,
  75				  ctx->enckey_dma_addr);
  76		dev_dbg(dev, "Freed enckey DMA buffer enckey_dma_addr=%pad\n",
  77			&ctx->enckey_dma_addr);
  78		ctx->enckey_dma_addr = 0;
  79		ctx->enckey = NULL;
  80	}
  81
  82	if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
  83		struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
  84
  85		if (xcbc->xcbc_keys) {
  86			dma_free_coherent(dev, CC_AES_128_BIT_KEY_SIZE * 3,
  87					  xcbc->xcbc_keys,
  88					  xcbc->xcbc_keys_dma_addr);
  89		}
  90		dev_dbg(dev, "Freed xcbc_keys DMA buffer xcbc_keys_dma_addr=%pad\n",
  91			&xcbc->xcbc_keys_dma_addr);
  92		xcbc->xcbc_keys_dma_addr = 0;
  93		xcbc->xcbc_keys = NULL;
  94	} else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC auth. */
  95		struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
  96
  97		if (hmac->ipad_opad) {
  98			dma_free_coherent(dev, 2 * MAX_HMAC_DIGEST_SIZE,
  99					  hmac->ipad_opad,
 100					  hmac->ipad_opad_dma_addr);
 101			dev_dbg(dev, "Freed ipad_opad DMA buffer ipad_opad_dma_addr=%pad\n",
 102				&hmac->ipad_opad_dma_addr);
 103			hmac->ipad_opad_dma_addr = 0;
 104			hmac->ipad_opad = NULL;
 105		}
 106		if (hmac->padded_authkey) {
 107			dma_free_coherent(dev, MAX_HMAC_BLOCK_SIZE,
 108					  hmac->padded_authkey,
 109					  hmac->padded_authkey_dma_addr);
 110			dev_dbg(dev, "Freed padded_authkey DMA buffer padded_authkey_dma_addr=%pad\n",
 111				&hmac->padded_authkey_dma_addr);
 112			hmac->padded_authkey_dma_addr = 0;
 113			hmac->padded_authkey = NULL;
 114		}
 115	}
 116}
 117
 118static unsigned int cc_get_aead_hash_len(struct crypto_aead *tfm)
 119{
 120	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 121
 122	return cc_get_default_hash_len(ctx->drvdata);
 123}
 124
 125static int cc_aead_init(struct crypto_aead *tfm)
 126{
 127	struct aead_alg *alg = crypto_aead_alg(tfm);
 128	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 129	struct cc_crypto_alg *cc_alg =
 130			container_of(alg, struct cc_crypto_alg, aead_alg);
 131	struct device *dev = drvdata_to_dev(cc_alg->drvdata);
 132
 133	dev_dbg(dev, "Initializing context @%p for %s\n", ctx,
 134		crypto_tfm_alg_name(&tfm->base));
 135
 136	/* Initialize modes in instance */
 137	ctx->cipher_mode = cc_alg->cipher_mode;
 138	ctx->flow_mode = cc_alg->flow_mode;
 139	ctx->auth_mode = cc_alg->auth_mode;
 140	ctx->drvdata = cc_alg->drvdata;
 141	crypto_aead_set_reqsize_dma(tfm, sizeof(struct aead_req_ctx));
 142
 143	/* Allocate key buffer, cache line aligned */
 144	ctx->enckey = dma_alloc_coherent(dev, AES_MAX_KEY_SIZE,
 145					 &ctx->enckey_dma_addr, GFP_KERNEL);
 146	if (!ctx->enckey) {
 147		dev_err(dev, "Failed allocating key buffer\n");
 148		goto init_failed;
 149	}
 150	dev_dbg(dev, "Allocated enckey buffer in context ctx->enckey=@%p\n",
 151		ctx->enckey);
 152
 153	/* Set default authlen value */
 154
 155	if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
 156		struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
 157		const unsigned int key_size = CC_AES_128_BIT_KEY_SIZE * 3;
 158
 159		/* Allocate dma-coherent buffer for XCBC's K1+K2+K3 */
 160		/* (and temporary for user key - up to 256b) */
 161		xcbc->xcbc_keys = dma_alloc_coherent(dev, key_size,
 162						     &xcbc->xcbc_keys_dma_addr,
 163						     GFP_KERNEL);
 164		if (!xcbc->xcbc_keys) {
 165			dev_err(dev, "Failed allocating buffer for XCBC keys\n");
 166			goto init_failed;
 167		}
 168	} else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC authentication */
 169		struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
 170		const unsigned int digest_size = 2 * MAX_HMAC_DIGEST_SIZE;
 171		dma_addr_t *pkey_dma = &hmac->padded_authkey_dma_addr;
 172
 173		/* Allocate dma-coherent buffer for IPAD + OPAD */
 174		hmac->ipad_opad = dma_alloc_coherent(dev, digest_size,
 175						     &hmac->ipad_opad_dma_addr,
 176						     GFP_KERNEL);
 177
 178		if (!hmac->ipad_opad) {
 179			dev_err(dev, "Failed allocating IPAD/OPAD buffer\n");
 180			goto init_failed;
 181		}
 182
 183		dev_dbg(dev, "Allocated authkey buffer in context ctx->authkey=@%p\n",
 184			hmac->ipad_opad);
 185
 186		hmac->padded_authkey = dma_alloc_coherent(dev,
 187							  MAX_HMAC_BLOCK_SIZE,
 188							  pkey_dma,
 189							  GFP_KERNEL);
 190
 191		if (!hmac->padded_authkey) {
 192			dev_err(dev, "failed to allocate padded_authkey\n");
 193			goto init_failed;
 194		}
 195	} else {
 196		ctx->auth_state.hmac.ipad_opad = NULL;
 197		ctx->auth_state.hmac.padded_authkey = NULL;
 198	}
 199	ctx->hash_len = cc_get_aead_hash_len(tfm);
 200
 201	return 0;
 202
 203init_failed:
 204	cc_aead_exit(tfm);
 205	return -ENOMEM;
 206}
 207
 208static void cc_aead_complete(struct device *dev, void *cc_req, int err)
 209{
 210	struct aead_request *areq = (struct aead_request *)cc_req;
 211	struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(areq);
 212	struct crypto_aead *tfm = crypto_aead_reqtfm(cc_req);
 213	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 214
 215	/* BACKLOG notification */
 216	if (err == -EINPROGRESS)
 217		goto done;
 218
 219	cc_unmap_aead_request(dev, areq);
 220
 221	/* Restore ordinary iv pointer */
 222	areq->iv = areq_ctx->backup_iv;
 223
 224	if (err)
 225		goto done;
 226
 227	if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
 228		if (memcmp(areq_ctx->mac_buf, areq_ctx->icv_virt_addr,
 229			   ctx->authsize) != 0) {
 230			dev_dbg(dev, "Payload authentication failure, (auth-size=%d, cipher=%d)\n",
 231				ctx->authsize, ctx->cipher_mode);
 232			/* In case of payload authentication failure, MUST NOT
 233			 * revealed the decrypted message --> zero its memory.
 234			 */
 235			sg_zero_buffer(areq->dst, sg_nents(areq->dst),
 236				       areq->cryptlen, areq->assoclen);
 237			err = -EBADMSG;
 238		}
 239	/*ENCRYPT*/
 240	} else if (areq_ctx->is_icv_fragmented) {
 241		u32 skip = areq->cryptlen + areq_ctx->dst_offset;
 242
 243		cc_copy_sg_portion(dev, areq_ctx->mac_buf, areq_ctx->dst_sgl,
 244				   skip, (skip + ctx->authsize),
 245				   CC_SG_FROM_BUF);
 246	}
 247done:
 248	aead_request_complete(areq, err);
 249}
 250
 251static unsigned int xcbc_setkey(struct cc_hw_desc *desc,
 252				struct cc_aead_ctx *ctx)
 253{
 254	/* Load the AES key */
 255	hw_desc_init(&desc[0]);
 256	/* We are using for the source/user key the same buffer
 257	 * as for the output keys, * because after this key loading it
 258	 * is not needed anymore
 259	 */
 260	set_din_type(&desc[0], DMA_DLLI,
 261		     ctx->auth_state.xcbc.xcbc_keys_dma_addr, ctx->auth_keylen,
 262		     NS_BIT);
 263	set_cipher_mode(&desc[0], DRV_CIPHER_ECB);
 264	set_cipher_config0(&desc[0], DRV_CRYPTO_DIRECTION_ENCRYPT);
 265	set_key_size_aes(&desc[0], ctx->auth_keylen);
 266	set_flow_mode(&desc[0], S_DIN_to_AES);
 267	set_setup_mode(&desc[0], SETUP_LOAD_KEY0);
 268
 269	hw_desc_init(&desc[1]);
 270	set_din_const(&desc[1], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
 271	set_flow_mode(&desc[1], DIN_AES_DOUT);
 272	set_dout_dlli(&desc[1], ctx->auth_state.xcbc.xcbc_keys_dma_addr,
 273		      AES_KEYSIZE_128, NS_BIT, 0);
 274
 275	hw_desc_init(&desc[2]);
 276	set_din_const(&desc[2], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
 277	set_flow_mode(&desc[2], DIN_AES_DOUT);
 278	set_dout_dlli(&desc[2], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
 279					 + AES_KEYSIZE_128),
 280			      AES_KEYSIZE_128, NS_BIT, 0);
 281
 282	hw_desc_init(&desc[3]);
 283	set_din_const(&desc[3], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
 284	set_flow_mode(&desc[3], DIN_AES_DOUT);
 285	set_dout_dlli(&desc[3], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
 286					  + 2 * AES_KEYSIZE_128),
 287			      AES_KEYSIZE_128, NS_BIT, 0);
 288
 289	return 4;
 290}
 291
 292static unsigned int hmac_setkey(struct cc_hw_desc *desc,
 293				struct cc_aead_ctx *ctx)
 294{
 295	unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
 296	unsigned int digest_ofs = 0;
 297	unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
 298			DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
 299	unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
 300			CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
 301	struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
 302
 303	unsigned int idx = 0;
 304	int i;
 305
 306	/* calc derived HMAC key */
 307	for (i = 0; i < 2; i++) {
 308		/* Load hash initial state */
 309		hw_desc_init(&desc[idx]);
 310		set_cipher_mode(&desc[idx], hash_mode);
 311		set_din_sram(&desc[idx],
 312			     cc_larval_digest_addr(ctx->drvdata,
 313						   ctx->auth_mode),
 314			     digest_size);
 315		set_flow_mode(&desc[idx], S_DIN_to_HASH);
 316		set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 317		idx++;
 318
 319		/* Load the hash current length*/
 320		hw_desc_init(&desc[idx]);
 321		set_cipher_mode(&desc[idx], hash_mode);
 322		set_din_const(&desc[idx], 0, ctx->hash_len);
 323		set_flow_mode(&desc[idx], S_DIN_to_HASH);
 324		set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 325		idx++;
 326
 327		/* Prepare ipad key */
 328		hw_desc_init(&desc[idx]);
 329		set_xor_val(&desc[idx], hmac_pad_const[i]);
 330		set_cipher_mode(&desc[idx], hash_mode);
 331		set_flow_mode(&desc[idx], S_DIN_to_HASH);
 332		set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
 333		idx++;
 334
 335		/* Perform HASH update */
 336		hw_desc_init(&desc[idx]);
 337		set_din_type(&desc[idx], DMA_DLLI,
 338			     hmac->padded_authkey_dma_addr,
 339			     SHA256_BLOCK_SIZE, NS_BIT);
 340		set_cipher_mode(&desc[idx], hash_mode);
 341		set_xor_active(&desc[idx]);
 342		set_flow_mode(&desc[idx], DIN_HASH);
 343		idx++;
 344
 345		/* Get the digset */
 346		hw_desc_init(&desc[idx]);
 347		set_cipher_mode(&desc[idx], hash_mode);
 348		set_dout_dlli(&desc[idx],
 349			      (hmac->ipad_opad_dma_addr + digest_ofs),
 350			      digest_size, NS_BIT, 0);
 351		set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 352		set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 353		set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
 354		idx++;
 355
 356		digest_ofs += digest_size;
 357	}
 358
 359	return idx;
 360}
 361
 362static int validate_keys_sizes(struct cc_aead_ctx *ctx)
 363{
 364	struct device *dev = drvdata_to_dev(ctx->drvdata);
 365
 366	dev_dbg(dev, "enc_keylen=%u  authkeylen=%u\n",
 367		ctx->enc_keylen, ctx->auth_keylen);
 368
 369	switch (ctx->auth_mode) {
 370	case DRV_HASH_SHA1:
 371	case DRV_HASH_SHA256:
 372		break;
 373	case DRV_HASH_XCBC_MAC:
 374		if (ctx->auth_keylen != AES_KEYSIZE_128 &&
 375		    ctx->auth_keylen != AES_KEYSIZE_192 &&
 376		    ctx->auth_keylen != AES_KEYSIZE_256)
 377			return -ENOTSUPP;
 378		break;
 379	case DRV_HASH_NULL: /* Not authenc (e.g., CCM) - no auth_key) */
 380		if (ctx->auth_keylen > 0)
 381			return -EINVAL;
 382		break;
 383	default:
 384		dev_dbg(dev, "Invalid auth_mode=%d\n", ctx->auth_mode);
 385		return -EINVAL;
 386	}
 387	/* Check cipher key size */
 388	if (ctx->flow_mode == S_DIN_to_DES) {
 389		if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) {
 390			dev_dbg(dev, "Invalid cipher(3DES) key size: %u\n",
 391				ctx->enc_keylen);
 392			return -EINVAL;
 393		}
 394	} else { /* Default assumed to be AES ciphers */
 395		if (ctx->enc_keylen != AES_KEYSIZE_128 &&
 396		    ctx->enc_keylen != AES_KEYSIZE_192 &&
 397		    ctx->enc_keylen != AES_KEYSIZE_256) {
 398			dev_dbg(dev, "Invalid cipher(AES) key size: %u\n",
 399				ctx->enc_keylen);
 400			return -EINVAL;
 401		}
 402	}
 403
 404	return 0; /* All tests of keys sizes passed */
 405}
 406
 407/* This function prepers the user key so it can pass to the hmac processing
 408 * (copy to intenral buffer or hash in case of key longer than block
 409 */
 410static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *authkey,
 411				 unsigned int keylen)
 412{
 413	dma_addr_t key_dma_addr = 0;
 414	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 415	struct device *dev = drvdata_to_dev(ctx->drvdata);
 416	u32 larval_addr;
 417	struct cc_crypto_req cc_req = {};
 418	unsigned int blocksize;
 419	unsigned int digestsize;
 420	unsigned int hashmode;
 421	unsigned int idx = 0;
 422	int rc = 0;
 423	u8 *key = NULL;
 424	struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
 425	dma_addr_t padded_authkey_dma_addr =
 426		ctx->auth_state.hmac.padded_authkey_dma_addr;
 427
 428	switch (ctx->auth_mode) { /* auth_key required and >0 */
 429	case DRV_HASH_SHA1:
 430		blocksize = SHA1_BLOCK_SIZE;
 431		digestsize = SHA1_DIGEST_SIZE;
 432		hashmode = DRV_HASH_HW_SHA1;
 433		break;
 434	case DRV_HASH_SHA256:
 435	default:
 436		blocksize = SHA256_BLOCK_SIZE;
 437		digestsize = SHA256_DIGEST_SIZE;
 438		hashmode = DRV_HASH_HW_SHA256;
 439	}
 440
 441	if (keylen != 0) {
 442
 443		key = kmemdup(authkey, keylen, GFP_KERNEL);
 444		if (!key)
 445			return -ENOMEM;
 446
 447		key_dma_addr = dma_map_single(dev, key, keylen, DMA_TO_DEVICE);
 
 448		if (dma_mapping_error(dev, key_dma_addr)) {
 449			dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
 450				key, keylen);
 451			kfree_sensitive(key);
 452			return -ENOMEM;
 453		}
 454		if (keylen > blocksize) {
 455			/* Load hash initial state */
 456			hw_desc_init(&desc[idx]);
 457			set_cipher_mode(&desc[idx], hashmode);
 458			larval_addr = cc_larval_digest_addr(ctx->drvdata,
 459							    ctx->auth_mode);
 460			set_din_sram(&desc[idx], larval_addr, digestsize);
 461			set_flow_mode(&desc[idx], S_DIN_to_HASH);
 462			set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 463			idx++;
 464
 465			/* Load the hash current length*/
 466			hw_desc_init(&desc[idx]);
 467			set_cipher_mode(&desc[idx], hashmode);
 468			set_din_const(&desc[idx], 0, ctx->hash_len);
 469			set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
 470			set_flow_mode(&desc[idx], S_DIN_to_HASH);
 471			set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 472			idx++;
 473
 474			hw_desc_init(&desc[idx]);
 475			set_din_type(&desc[idx], DMA_DLLI,
 476				     key_dma_addr, keylen, NS_BIT);
 477			set_flow_mode(&desc[idx], DIN_HASH);
 478			idx++;
 479
 480			/* Get hashed key */
 481			hw_desc_init(&desc[idx]);
 482			set_cipher_mode(&desc[idx], hashmode);
 483			set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
 484				      digestsize, NS_BIT, 0);
 485			set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 486			set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 487			set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
 488			set_cipher_config0(&desc[idx],
 489					   HASH_DIGEST_RESULT_LITTLE_ENDIAN);
 490			idx++;
 491
 492			hw_desc_init(&desc[idx]);
 493			set_din_const(&desc[idx], 0, (blocksize - digestsize));
 494			set_flow_mode(&desc[idx], BYPASS);
 495			set_dout_dlli(&desc[idx], (padded_authkey_dma_addr +
 496				      digestsize), (blocksize - digestsize),
 497				      NS_BIT, 0);
 498			idx++;
 499		} else {
 500			hw_desc_init(&desc[idx]);
 501			set_din_type(&desc[idx], DMA_DLLI, key_dma_addr,
 502				     keylen, NS_BIT);
 503			set_flow_mode(&desc[idx], BYPASS);
 504			set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
 505				      keylen, NS_BIT, 0);
 506			idx++;
 507
 508			if ((blocksize - keylen) != 0) {
 509				hw_desc_init(&desc[idx]);
 510				set_din_const(&desc[idx], 0,
 511					      (blocksize - keylen));
 512				set_flow_mode(&desc[idx], BYPASS);
 513				set_dout_dlli(&desc[idx],
 514					      (padded_authkey_dma_addr +
 515					       keylen),
 516					      (blocksize - keylen), NS_BIT, 0);
 517				idx++;
 518			}
 519		}
 520	} else {
 521		hw_desc_init(&desc[idx]);
 522		set_din_const(&desc[idx], 0, (blocksize - keylen));
 523		set_flow_mode(&desc[idx], BYPASS);
 524		set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
 525			      blocksize, NS_BIT, 0);
 526		idx++;
 527	}
 528
 529	rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
 530	if (rc)
 531		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
 532
 533	if (key_dma_addr)
 534		dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
 535
 536	kfree_sensitive(key);
 537
 538	return rc;
 539}
 540
 541static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
 542			  unsigned int keylen)
 543{
 544	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 545	struct cc_crypto_req cc_req = {};
 546	struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
 547	unsigned int seq_len = 0;
 548	struct device *dev = drvdata_to_dev(ctx->drvdata);
 549	const u8 *enckey, *authkey;
 550	int rc;
 551
 552	dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n",
 553		ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen);
 554
 555	/* STAT_PHASE_0: Init and sanity checks */
 556
 557	if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */
 558		struct crypto_authenc_keys keys;
 559
 560		rc = crypto_authenc_extractkeys(&keys, key, keylen);
 561		if (rc)
 562			return rc;
 563		enckey = keys.enckey;
 564		authkey = keys.authkey;
 565		ctx->enc_keylen = keys.enckeylen;
 566		ctx->auth_keylen = keys.authkeylen;
 567
 568		if (ctx->cipher_mode == DRV_CIPHER_CTR) {
 569			/* the nonce is stored in bytes at end of key */
 
 570			if (ctx->enc_keylen <
 571			    (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE))
 572				return -EINVAL;
 573			/* Copy nonce from last 4 bytes in CTR key to
 574			 *  first 4 bytes in CTR IV
 575			 */
 576			memcpy(ctx->ctr_nonce, enckey + ctx->enc_keylen -
 577			       CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE);
 578			/* Set CTR key size */
 579			ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE;
 580		}
 581	} else { /* non-authenc - has just one key */
 582		enckey = key;
 583		authkey = NULL;
 584		ctx->enc_keylen = keylen;
 585		ctx->auth_keylen = 0;
 586	}
 587
 588	rc = validate_keys_sizes(ctx);
 589	if (rc)
 590		return rc;
 591
 592	/* STAT_PHASE_1: Copy key to ctx */
 593
 594	/* Get key material */
 595	memcpy(ctx->enckey, enckey, ctx->enc_keylen);
 596	if (ctx->enc_keylen == 24)
 597		memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
 598	if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
 599		memcpy(ctx->auth_state.xcbc.xcbc_keys, authkey,
 600		       ctx->auth_keylen);
 601	} else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
 602		rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen);
 603		if (rc)
 604			return rc;
 605	}
 606
 607	/* STAT_PHASE_2: Create sequence */
 608
 609	switch (ctx->auth_mode) {
 610	case DRV_HASH_SHA1:
 611	case DRV_HASH_SHA256:
 612		seq_len = hmac_setkey(desc, ctx);
 613		break;
 614	case DRV_HASH_XCBC_MAC:
 615		seq_len = xcbc_setkey(desc, ctx);
 616		break;
 617	case DRV_HASH_NULL: /* non-authenc modes, e.g., CCM */
 618		break; /* No auth. key setup */
 619	default:
 620		dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
 621		return -ENOTSUPP;
 
 622	}
 623
 624	/* STAT_PHASE_3: Submit sequence to HW */
 625
 626	if (seq_len > 0) { /* For CCM there is no sequence to setup the key */
 627		rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, seq_len);
 628		if (rc) {
 629			dev_err(dev, "send_request() failed (rc=%d)\n", rc);
 630			return rc;
 631		}
 632	}
 633
 634	/* Update STAT_PHASE_3 */
 635	return rc;
 
 
 
 
 
 
 636}
 637
 638static int cc_des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
 639			       unsigned int keylen)
 640{
 641	struct crypto_authenc_keys keys;
 642	int err;
 643
 644	err = crypto_authenc_extractkeys(&keys, key, keylen);
 645	if (unlikely(err))
 646		return err;
 647
 648	err = verify_aead_des3_key(aead, keys.enckey, keys.enckeylen) ?:
 649	      cc_aead_setkey(aead, key, keylen);
 650
 651	memzero_explicit(&keys, sizeof(keys));
 652	return err;
 653}
 654
 655static int cc_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key,
 656				 unsigned int keylen)
 657{
 658	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 659
 660	if (keylen < 3)
 661		return -EINVAL;
 662
 663	keylen -= 3;
 664	memcpy(ctx->ctr_nonce, key + keylen, 3);
 665
 666	return cc_aead_setkey(tfm, key, keylen);
 667}
 668
 669static int cc_aead_setauthsize(struct crypto_aead *authenc,
 670			       unsigned int authsize)
 671{
 672	struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
 673	struct device *dev = drvdata_to_dev(ctx->drvdata);
 674
 675	/* Unsupported auth. sizes */
 676	if (authsize == 0 ||
 677	    authsize > crypto_aead_maxauthsize(authenc)) {
 678		return -ENOTSUPP;
 679	}
 680
 681	ctx->authsize = authsize;
 682	dev_dbg(dev, "authlen=%d\n", ctx->authsize);
 683
 684	return 0;
 685}
 686
 687static int cc_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
 688				      unsigned int authsize)
 689{
 690	switch (authsize) {
 691	case 8:
 692	case 12:
 693	case 16:
 694		break;
 695	default:
 696		return -EINVAL;
 697	}
 698
 699	return cc_aead_setauthsize(authenc, authsize);
 700}
 701
 702static int cc_ccm_setauthsize(struct crypto_aead *authenc,
 703			      unsigned int authsize)
 704{
 705	switch (authsize) {
 706	case 4:
 707	case 6:
 708	case 8:
 709	case 10:
 710	case 12:
 711	case 14:
 712	case 16:
 713		break;
 714	default:
 715		return -EINVAL;
 716	}
 717
 718	return cc_aead_setauthsize(authenc, authsize);
 719}
 720
 721static void cc_set_assoc_desc(struct aead_request *areq, unsigned int flow_mode,
 722			      struct cc_hw_desc desc[], unsigned int *seq_size)
 723{
 724	struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
 725	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 726	struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(areq);
 727	enum cc_req_dma_buf_type assoc_dma_type = areq_ctx->assoc_buff_type;
 728	unsigned int idx = *seq_size;
 729	struct device *dev = drvdata_to_dev(ctx->drvdata);
 730
 731	switch (assoc_dma_type) {
 732	case CC_DMA_BUF_DLLI:
 733		dev_dbg(dev, "ASSOC buffer type DLLI\n");
 734		hw_desc_init(&desc[idx]);
 735		set_din_type(&desc[idx], DMA_DLLI, sg_dma_address(areq->src),
 736			     areq_ctx->assoclen, NS_BIT);
 737		set_flow_mode(&desc[idx], flow_mode);
 738		if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
 739		    areq_ctx->cryptlen > 0)
 740			set_din_not_last_indication(&desc[idx]);
 741		break;
 742	case CC_DMA_BUF_MLLI:
 743		dev_dbg(dev, "ASSOC buffer type MLLI\n");
 744		hw_desc_init(&desc[idx]);
 745		set_din_type(&desc[idx], DMA_MLLI, areq_ctx->assoc.sram_addr,
 746			     areq_ctx->assoc.mlli_nents, NS_BIT);
 747		set_flow_mode(&desc[idx], flow_mode);
 748		if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
 749		    areq_ctx->cryptlen > 0)
 750			set_din_not_last_indication(&desc[idx]);
 751		break;
 752	case CC_DMA_BUF_NULL:
 753	default:
 754		dev_err(dev, "Invalid ASSOC buffer type\n");
 755	}
 756
 757	*seq_size = (++idx);
 758}
 759
 760static void cc_proc_authen_desc(struct aead_request *areq,
 761				unsigned int flow_mode,
 762				struct cc_hw_desc desc[],
 763				unsigned int *seq_size, int direct)
 764{
 765	struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(areq);
 766	enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
 767	unsigned int idx = *seq_size;
 768	struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
 769	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 770	struct device *dev = drvdata_to_dev(ctx->drvdata);
 771
 772	switch (data_dma_type) {
 773	case CC_DMA_BUF_DLLI:
 774	{
 775		struct scatterlist *cipher =
 776			(direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
 777			areq_ctx->dst_sgl : areq_ctx->src_sgl;
 778
 779		unsigned int offset =
 780			(direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
 781			areq_ctx->dst_offset : areq_ctx->src_offset;
 782		dev_dbg(dev, "AUTHENC: SRC/DST buffer type DLLI\n");
 783		hw_desc_init(&desc[idx]);
 784		set_din_type(&desc[idx], DMA_DLLI,
 785			     (sg_dma_address(cipher) + offset),
 786			     areq_ctx->cryptlen, NS_BIT);
 787		set_flow_mode(&desc[idx], flow_mode);
 788		break;
 789	}
 790	case CC_DMA_BUF_MLLI:
 791	{
 792		/* DOUBLE-PASS flow (as default)
 793		 * assoc. + iv + data -compact in one table
 794		 * if assoclen is ZERO only IV perform
 795		 */
 796		u32 mlli_addr = areq_ctx->assoc.sram_addr;
 797		u32 mlli_nents = areq_ctx->assoc.mlli_nents;
 798
 799		if (areq_ctx->is_single_pass) {
 800			if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
 801				mlli_addr = areq_ctx->dst.sram_addr;
 802				mlli_nents = areq_ctx->dst.mlli_nents;
 803			} else {
 804				mlli_addr = areq_ctx->src.sram_addr;
 805				mlli_nents = areq_ctx->src.mlli_nents;
 806			}
 807		}
 808
 809		dev_dbg(dev, "AUTHENC: SRC/DST buffer type MLLI\n");
 810		hw_desc_init(&desc[idx]);
 811		set_din_type(&desc[idx], DMA_MLLI, mlli_addr, mlli_nents,
 812			     NS_BIT);
 813		set_flow_mode(&desc[idx], flow_mode);
 814		break;
 815	}
 816	case CC_DMA_BUF_NULL:
 817	default:
 818		dev_err(dev, "AUTHENC: Invalid SRC/DST buffer type\n");
 819	}
 820
 821	*seq_size = (++idx);
 822}
 823
 824static void cc_proc_cipher_desc(struct aead_request *areq,
 825				unsigned int flow_mode,
 826				struct cc_hw_desc desc[],
 827				unsigned int *seq_size)
 828{
 829	unsigned int idx = *seq_size;
 830	struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(areq);
 831	enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
 832	struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
 833	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 834	struct device *dev = drvdata_to_dev(ctx->drvdata);
 835
 836	if (areq_ctx->cryptlen == 0)
 837		return; /*null processing*/
 838
 839	switch (data_dma_type) {
 840	case CC_DMA_BUF_DLLI:
 841		dev_dbg(dev, "CIPHER: SRC/DST buffer type DLLI\n");
 842		hw_desc_init(&desc[idx]);
 843		set_din_type(&desc[idx], DMA_DLLI,
 844			     (sg_dma_address(areq_ctx->src_sgl) +
 845			      areq_ctx->src_offset), areq_ctx->cryptlen,
 846			      NS_BIT);
 847		set_dout_dlli(&desc[idx],
 848			      (sg_dma_address(areq_ctx->dst_sgl) +
 849			       areq_ctx->dst_offset),
 850			      areq_ctx->cryptlen, NS_BIT, 0);
 851		set_flow_mode(&desc[idx], flow_mode);
 852		break;
 853	case CC_DMA_BUF_MLLI:
 854		dev_dbg(dev, "CIPHER: SRC/DST buffer type MLLI\n");
 855		hw_desc_init(&desc[idx]);
 856		set_din_type(&desc[idx], DMA_MLLI, areq_ctx->src.sram_addr,
 857			     areq_ctx->src.mlli_nents, NS_BIT);
 858		set_dout_mlli(&desc[idx], areq_ctx->dst.sram_addr,
 859			      areq_ctx->dst.mlli_nents, NS_BIT, 0);
 860		set_flow_mode(&desc[idx], flow_mode);
 861		break;
 862	case CC_DMA_BUF_NULL:
 863	default:
 864		dev_err(dev, "CIPHER: Invalid SRC/DST buffer type\n");
 865	}
 866
 867	*seq_size = (++idx);
 868}
 869
 870static void cc_proc_digest_desc(struct aead_request *req,
 871				struct cc_hw_desc desc[],
 872				unsigned int *seq_size)
 873{
 874	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 875	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 876	struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req);
 877	unsigned int idx = *seq_size;
 878	unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
 879				DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
 880	int direct = req_ctx->gen_ctx.op_type;
 881
 882	/* Get final ICV result */
 883	if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
 884		hw_desc_init(&desc[idx]);
 885		set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 886		set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 887		set_dout_dlli(&desc[idx], req_ctx->icv_dma_addr, ctx->authsize,
 888			      NS_BIT, 1);
 889		set_queue_last_ind(ctx->drvdata, &desc[idx]);
 890		if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
 891			set_aes_not_hash_mode(&desc[idx]);
 892			set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
 893		} else {
 894			set_cipher_config0(&desc[idx],
 895					   HASH_DIGEST_RESULT_LITTLE_ENDIAN);
 896			set_cipher_mode(&desc[idx], hash_mode);
 897		}
 898	} else { /*Decrypt*/
 899		/* Get ICV out from hardware */
 900		hw_desc_init(&desc[idx]);
 901		set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 902		set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 903		set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr,
 904			      ctx->authsize, NS_BIT, 1);
 905		set_queue_last_ind(ctx->drvdata, &desc[idx]);
 906		set_cipher_config0(&desc[idx],
 907				   HASH_DIGEST_RESULT_LITTLE_ENDIAN);
 908		set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
 909		if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
 910			set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
 911			set_aes_not_hash_mode(&desc[idx]);
 912		} else {
 913			set_cipher_mode(&desc[idx], hash_mode);
 914		}
 915	}
 916
 917	*seq_size = (++idx);
 918}
 919
 920static void cc_set_cipher_desc(struct aead_request *req,
 921			       struct cc_hw_desc desc[],
 922			       unsigned int *seq_size)
 923{
 924	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 925	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 926	struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req);
 927	unsigned int hw_iv_size = req_ctx->hw_iv_size;
 928	unsigned int idx = *seq_size;
 929	int direct = req_ctx->gen_ctx.op_type;
 930
 931	/* Setup cipher state */
 932	hw_desc_init(&desc[idx]);
 933	set_cipher_config0(&desc[idx], direct);
 934	set_flow_mode(&desc[idx], ctx->flow_mode);
 935	set_din_type(&desc[idx], DMA_DLLI, req_ctx->gen_ctx.iv_dma_addr,
 936		     hw_iv_size, NS_BIT);
 937	if (ctx->cipher_mode == DRV_CIPHER_CTR)
 938		set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
 939	else
 940		set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 941	set_cipher_mode(&desc[idx], ctx->cipher_mode);
 942	idx++;
 943
 944	/* Setup enc. key */
 945	hw_desc_init(&desc[idx]);
 946	set_cipher_config0(&desc[idx], direct);
 947	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 948	set_flow_mode(&desc[idx], ctx->flow_mode);
 949	if (ctx->flow_mode == S_DIN_to_AES) {
 950		set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
 951			     ((ctx->enc_keylen == 24) ? CC_AES_KEY_SIZE_MAX :
 952			      ctx->enc_keylen), NS_BIT);
 953		set_key_size_aes(&desc[idx], ctx->enc_keylen);
 954	} else {
 955		set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
 956			     ctx->enc_keylen, NS_BIT);
 957		set_key_size_des(&desc[idx], ctx->enc_keylen);
 958	}
 959	set_cipher_mode(&desc[idx], ctx->cipher_mode);
 960	idx++;
 961
 962	*seq_size = idx;
 963}
 964
 965static void cc_proc_cipher(struct aead_request *req, struct cc_hw_desc desc[],
 966			   unsigned int *seq_size, unsigned int data_flow_mode)
 967{
 968	struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req);
 969	int direct = req_ctx->gen_ctx.op_type;
 970	unsigned int idx = *seq_size;
 971
 972	if (req_ctx->cryptlen == 0)
 973		return; /*null processing*/
 974
 975	cc_set_cipher_desc(req, desc, &idx);
 976	cc_proc_cipher_desc(req, data_flow_mode, desc, &idx);
 977	if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
 978		/* We must wait for DMA to write all cipher */
 979		hw_desc_init(&desc[idx]);
 980		set_din_no_dma(&desc[idx], 0, 0xfffff0);
 981		set_dout_no_dma(&desc[idx], 0, 0, 1);
 982		idx++;
 983	}
 984
 985	*seq_size = idx;
 986}
 987
 988static void cc_set_hmac_desc(struct aead_request *req, struct cc_hw_desc desc[],
 989			     unsigned int *seq_size)
 990{
 991	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 992	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 993	unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
 994				DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
 995	unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
 996				CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
 997	unsigned int idx = *seq_size;
 998
 999	/* Loading hash ipad xor key state */
1000	hw_desc_init(&desc[idx]);
1001	set_cipher_mode(&desc[idx], hash_mode);
1002	set_din_type(&desc[idx], DMA_DLLI,
1003		     ctx->auth_state.hmac.ipad_opad_dma_addr, digest_size,
1004		     NS_BIT);
1005	set_flow_mode(&desc[idx], S_DIN_to_HASH);
1006	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1007	idx++;
1008
1009	/* Load init. digest len (64 bytes) */
1010	hw_desc_init(&desc[idx]);
1011	set_cipher_mode(&desc[idx], hash_mode);
1012	set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
1013		     ctx->hash_len);
1014	set_flow_mode(&desc[idx], S_DIN_to_HASH);
1015	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1016	idx++;
1017
1018	*seq_size = idx;
1019}
1020
1021static void cc_set_xcbc_desc(struct aead_request *req, struct cc_hw_desc desc[],
1022			     unsigned int *seq_size)
1023{
1024	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1025	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1026	unsigned int idx = *seq_size;
1027
1028	/* Loading MAC state */
1029	hw_desc_init(&desc[idx]);
1030	set_din_const(&desc[idx], 0, CC_AES_BLOCK_SIZE);
1031	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1032	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1033	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1034	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1035	set_flow_mode(&desc[idx], S_DIN_to_HASH);
1036	set_aes_not_hash_mode(&desc[idx]);
1037	idx++;
1038
1039	/* Setup XCBC MAC K1 */
1040	hw_desc_init(&desc[idx]);
1041	set_din_type(&desc[idx], DMA_DLLI,
1042		     ctx->auth_state.xcbc.xcbc_keys_dma_addr,
1043		     AES_KEYSIZE_128, NS_BIT);
1044	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1045	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1046	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1047	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1048	set_flow_mode(&desc[idx], S_DIN_to_HASH);
1049	set_aes_not_hash_mode(&desc[idx]);
1050	idx++;
1051
1052	/* Setup XCBC MAC K2 */
1053	hw_desc_init(&desc[idx]);
1054	set_din_type(&desc[idx], DMA_DLLI,
1055		     (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
1056		      AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT);
1057	set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1058	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1059	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1060	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1061	set_flow_mode(&desc[idx], S_DIN_to_HASH);
1062	set_aes_not_hash_mode(&desc[idx]);
1063	idx++;
1064
1065	/* Setup XCBC MAC K3 */
1066	hw_desc_init(&desc[idx]);
1067	set_din_type(&desc[idx], DMA_DLLI,
1068		     (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
1069		      2 * AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT);
1070	set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
1071	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1072	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1073	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1074	set_flow_mode(&desc[idx], S_DIN_to_HASH);
1075	set_aes_not_hash_mode(&desc[idx]);
1076	idx++;
1077
1078	*seq_size = idx;
1079}
1080
1081static void cc_proc_header_desc(struct aead_request *req,
1082				struct cc_hw_desc desc[],
1083				unsigned int *seq_size)
1084{
1085	struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
1086	unsigned int idx = *seq_size;
1087
1088	/* Hash associated data */
1089	if (areq_ctx->assoclen > 0)
1090		cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
1091
1092	/* Hash IV */
1093	*seq_size = idx;
1094}
1095
1096static void cc_proc_scheme_desc(struct aead_request *req,
1097				struct cc_hw_desc desc[],
1098				unsigned int *seq_size)
1099{
1100	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1101	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1102	struct cc_aead_handle *aead_handle = ctx->drvdata->aead_handle;
1103	unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
1104				DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
1105	unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
1106				CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
1107	unsigned int idx = *seq_size;
1108
1109	hw_desc_init(&desc[idx]);
1110	set_cipher_mode(&desc[idx], hash_mode);
1111	set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
1112		      ctx->hash_len);
1113	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1114	set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
1115	set_cipher_do(&desc[idx], DO_PAD);
1116	idx++;
1117
1118	/* Get final ICV result */
1119	hw_desc_init(&desc[idx]);
1120	set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
1121		      digest_size);
1122	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1123	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1124	set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
1125	set_cipher_mode(&desc[idx], hash_mode);
1126	idx++;
1127
1128	/* Loading hash opad xor key state */
1129	hw_desc_init(&desc[idx]);
1130	set_cipher_mode(&desc[idx], hash_mode);
1131	set_din_type(&desc[idx], DMA_DLLI,
1132		     (ctx->auth_state.hmac.ipad_opad_dma_addr + digest_size),
1133		     digest_size, NS_BIT);
1134	set_flow_mode(&desc[idx], S_DIN_to_HASH);
1135	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1136	idx++;
1137
1138	/* Load init. digest len (64 bytes) */
1139	hw_desc_init(&desc[idx]);
1140	set_cipher_mode(&desc[idx], hash_mode);
1141	set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
1142		     ctx->hash_len);
1143	set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1144	set_flow_mode(&desc[idx], S_DIN_to_HASH);
1145	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1146	idx++;
1147
1148	/* Perform HASH update */
1149	hw_desc_init(&desc[idx]);
1150	set_din_sram(&desc[idx], aead_handle->sram_workspace_addr,
1151		     digest_size);
1152	set_flow_mode(&desc[idx], DIN_HASH);
1153	idx++;
1154
1155	*seq_size = idx;
1156}
1157
1158static void cc_mlli_to_sram(struct aead_request *req,
1159			    struct cc_hw_desc desc[], unsigned int *seq_size)
1160{
1161	struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req);
1162	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1163	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1164	struct device *dev = drvdata_to_dev(ctx->drvdata);
1165
1166	if ((req_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
1167	    req_ctx->data_buff_type == CC_DMA_BUF_MLLI ||
1168	    !req_ctx->is_single_pass) && req_ctx->mlli_params.mlli_len) {
1169		dev_dbg(dev, "Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
1170			ctx->drvdata->mlli_sram_addr,
1171			req_ctx->mlli_params.mlli_len);
1172		/* Copy MLLI table host-to-sram */
1173		hw_desc_init(&desc[*seq_size]);
1174		set_din_type(&desc[*seq_size], DMA_DLLI,
1175			     req_ctx->mlli_params.mlli_dma_addr,
1176			     req_ctx->mlli_params.mlli_len, NS_BIT);
1177		set_dout_sram(&desc[*seq_size],
1178			      ctx->drvdata->mlli_sram_addr,
1179			      req_ctx->mlli_params.mlli_len);
1180		set_flow_mode(&desc[*seq_size], BYPASS);
1181		(*seq_size)++;
1182	}
1183}
1184
1185static enum cc_flow_mode cc_get_data_flow(enum drv_crypto_direction direct,
1186					  enum cc_flow_mode setup_flow_mode,
1187					  bool is_single_pass)
1188{
1189	enum cc_flow_mode data_flow_mode;
1190
1191	if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1192		if (setup_flow_mode == S_DIN_to_AES)
1193			data_flow_mode = is_single_pass ?
1194				AES_to_HASH_and_DOUT : DIN_AES_DOUT;
1195		else
1196			data_flow_mode = is_single_pass ?
1197				DES_to_HASH_and_DOUT : DIN_DES_DOUT;
1198	} else { /* Decrypt */
1199		if (setup_flow_mode == S_DIN_to_AES)
1200			data_flow_mode = is_single_pass ?
1201				AES_and_HASH : DIN_AES_DOUT;
1202		else
1203			data_flow_mode = is_single_pass ?
1204				DES_and_HASH : DIN_DES_DOUT;
1205	}
1206
1207	return data_flow_mode;
1208}
1209
1210static void cc_hmac_authenc(struct aead_request *req, struct cc_hw_desc desc[],
1211			    unsigned int *seq_size)
1212{
1213	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1214	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1215	struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req);
1216	int direct = req_ctx->gen_ctx.op_type;
1217	unsigned int data_flow_mode =
1218		cc_get_data_flow(direct, ctx->flow_mode,
1219				 req_ctx->is_single_pass);
1220
1221	if (req_ctx->is_single_pass) {
1222		/*
1223		 * Single-pass flow
1224		 */
1225		cc_set_hmac_desc(req, desc, seq_size);
1226		cc_set_cipher_desc(req, desc, seq_size);
1227		cc_proc_header_desc(req, desc, seq_size);
1228		cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
1229		cc_proc_scheme_desc(req, desc, seq_size);
1230		cc_proc_digest_desc(req, desc, seq_size);
1231		return;
1232	}
1233
1234	/*
1235	 * Double-pass flow
1236	 * Fallback for unsupported single-pass modes,
1237	 * i.e. using assoc. data of non-word-multiple
1238	 */
1239	if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1240		/* encrypt first.. */
1241		cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1242		/* authenc after..*/
1243		cc_set_hmac_desc(req, desc, seq_size);
1244		cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1245		cc_proc_scheme_desc(req, desc, seq_size);
1246		cc_proc_digest_desc(req, desc, seq_size);
1247
1248	} else { /*DECRYPT*/
1249		/* authenc first..*/
1250		cc_set_hmac_desc(req, desc, seq_size);
1251		cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1252		cc_proc_scheme_desc(req, desc, seq_size);
1253		/* decrypt after.. */
1254		cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1255		/* read the digest result with setting the completion bit
1256		 * must be after the cipher operation
1257		 */
1258		cc_proc_digest_desc(req, desc, seq_size);
1259	}
1260}
1261
1262static void
1263cc_xcbc_authenc(struct aead_request *req, struct cc_hw_desc desc[],
1264		unsigned int *seq_size)
1265{
1266	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1267	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1268	struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req);
1269	int direct = req_ctx->gen_ctx.op_type;
1270	unsigned int data_flow_mode =
1271		cc_get_data_flow(direct, ctx->flow_mode,
1272				 req_ctx->is_single_pass);
1273
1274	if (req_ctx->is_single_pass) {
1275		/*
1276		 * Single-pass flow
1277		 */
1278		cc_set_xcbc_desc(req, desc, seq_size);
1279		cc_set_cipher_desc(req, desc, seq_size);
1280		cc_proc_header_desc(req, desc, seq_size);
1281		cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
1282		cc_proc_digest_desc(req, desc, seq_size);
1283		return;
1284	}
1285
1286	/*
1287	 * Double-pass flow
1288	 * Fallback for unsupported single-pass modes,
1289	 * i.e. using assoc. data of non-word-multiple
1290	 */
1291	if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1292		/* encrypt first.. */
1293		cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1294		/* authenc after.. */
1295		cc_set_xcbc_desc(req, desc, seq_size);
1296		cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1297		cc_proc_digest_desc(req, desc, seq_size);
1298	} else { /*DECRYPT*/
1299		/* authenc first.. */
1300		cc_set_xcbc_desc(req, desc, seq_size);
1301		cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1302		/* decrypt after..*/
1303		cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1304		/* read the digest result with setting the completion bit
1305		 * must be after the cipher operation
1306		 */
1307		cc_proc_digest_desc(req, desc, seq_size);
1308	}
1309}
1310
1311static int validate_data_size(struct cc_aead_ctx *ctx,
1312			      enum drv_crypto_direction direct,
1313			      struct aead_request *req)
1314{
1315	struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
1316	struct device *dev = drvdata_to_dev(ctx->drvdata);
1317	unsigned int assoclen = areq_ctx->assoclen;
1318	unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
1319			(req->cryptlen - ctx->authsize) : req->cryptlen;
1320
1321	if (direct == DRV_CRYPTO_DIRECTION_DECRYPT &&
1322	    req->cryptlen < ctx->authsize)
1323		goto data_size_err;
1324
1325	areq_ctx->is_single_pass = true; /*defaulted to fast flow*/
1326
1327	switch (ctx->flow_mode) {
1328	case S_DIN_to_AES:
1329		if (ctx->cipher_mode == DRV_CIPHER_CBC &&
1330		    !IS_ALIGNED(cipherlen, AES_BLOCK_SIZE))
1331			goto data_size_err;
1332		if (ctx->cipher_mode == DRV_CIPHER_CCM)
1333			break;
1334		if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
1335			if (areq_ctx->plaintext_authenticate_only)
1336				areq_ctx->is_single_pass = false;
1337			break;
1338		}
1339
1340		if (!IS_ALIGNED(assoclen, sizeof(u32)))
1341			areq_ctx->is_single_pass = false;
1342
1343		if (ctx->cipher_mode == DRV_CIPHER_CTR &&
1344		    !IS_ALIGNED(cipherlen, sizeof(u32)))
1345			areq_ctx->is_single_pass = false;
1346
1347		break;
1348	case S_DIN_to_DES:
1349		if (!IS_ALIGNED(cipherlen, DES_BLOCK_SIZE))
1350			goto data_size_err;
1351		if (!IS_ALIGNED(assoclen, DES_BLOCK_SIZE))
1352			areq_ctx->is_single_pass = false;
1353		break;
1354	default:
1355		dev_err(dev, "Unexpected flow mode (%d)\n", ctx->flow_mode);
1356		goto data_size_err;
1357	}
1358
1359	return 0;
1360
1361data_size_err:
1362	return -EINVAL;
1363}
1364
1365static unsigned int format_ccm_a0(u8 *pa0_buff, u32 header_size)
1366{
1367	unsigned int len = 0;
1368
1369	if (header_size == 0)
1370		return 0;
1371
1372	if (header_size < ((1UL << 16) - (1UL << 8))) {
1373		len = 2;
1374
1375		pa0_buff[0] = (header_size >> 8) & 0xFF;
1376		pa0_buff[1] = header_size & 0xFF;
1377	} else {
1378		len = 6;
1379
1380		pa0_buff[0] = 0xFF;
1381		pa0_buff[1] = 0xFE;
1382		pa0_buff[2] = (header_size >> 24) & 0xFF;
1383		pa0_buff[3] = (header_size >> 16) & 0xFF;
1384		pa0_buff[4] = (header_size >> 8) & 0xFF;
1385		pa0_buff[5] = header_size & 0xFF;
1386	}
1387
1388	return len;
1389}
1390
1391static int set_msg_len(u8 *block, unsigned int msglen, unsigned int csize)
1392{
1393	__be32 data;
1394
1395	memset(block, 0, csize);
1396	block += csize;
1397
1398	if (csize >= 4)
1399		csize = 4;
1400	else if (msglen > (1 << (8 * csize)))
1401		return -EOVERFLOW;
1402
1403	data = cpu_to_be32(msglen);
1404	memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
1405
1406	return 0;
1407}
1408
1409static int cc_ccm(struct aead_request *req, struct cc_hw_desc desc[],
1410		  unsigned int *seq_size)
1411{
1412	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1413	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1414	struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req);
1415	unsigned int idx = *seq_size;
1416	unsigned int cipher_flow_mode;
1417	dma_addr_t mac_result;
1418
1419	if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1420		cipher_flow_mode = AES_to_HASH_and_DOUT;
1421		mac_result = req_ctx->mac_buf_dma_addr;
1422	} else { /* Encrypt */
1423		cipher_flow_mode = AES_and_HASH;
1424		mac_result = req_ctx->icv_dma_addr;
1425	}
1426
1427	/* load key */
1428	hw_desc_init(&desc[idx]);
1429	set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1430	set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1431		     ((ctx->enc_keylen == 24) ?  CC_AES_KEY_SIZE_MAX :
1432		      ctx->enc_keylen), NS_BIT);
1433	set_key_size_aes(&desc[idx], ctx->enc_keylen);
1434	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1435	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1436	set_flow_mode(&desc[idx], S_DIN_to_AES);
1437	idx++;
1438
1439	/* load ctr state */
1440	hw_desc_init(&desc[idx]);
1441	set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1442	set_key_size_aes(&desc[idx], ctx->enc_keylen);
1443	set_din_type(&desc[idx], DMA_DLLI,
1444		     req_ctx->gen_ctx.iv_dma_addr, AES_BLOCK_SIZE, NS_BIT);
1445	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1446	set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1447	set_flow_mode(&desc[idx], S_DIN_to_AES);
1448	idx++;
1449
1450	/* load MAC key */
1451	hw_desc_init(&desc[idx]);
1452	set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1453	set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1454		     ((ctx->enc_keylen == 24) ?  CC_AES_KEY_SIZE_MAX :
1455		      ctx->enc_keylen), NS_BIT);
1456	set_key_size_aes(&desc[idx], ctx->enc_keylen);
1457	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1458	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1459	set_flow_mode(&desc[idx], S_DIN_to_HASH);
1460	set_aes_not_hash_mode(&desc[idx]);
1461	idx++;
1462
1463	/* load MAC state */
1464	hw_desc_init(&desc[idx]);
1465	set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1466	set_key_size_aes(&desc[idx], ctx->enc_keylen);
1467	set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1468		     AES_BLOCK_SIZE, NS_BIT);
1469	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1470	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1471	set_flow_mode(&desc[idx], S_DIN_to_HASH);
1472	set_aes_not_hash_mode(&desc[idx]);
1473	idx++;
1474
1475	/* process assoc data */
1476	if (req_ctx->assoclen > 0) {
1477		cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
1478	} else {
1479		hw_desc_init(&desc[idx]);
1480		set_din_type(&desc[idx], DMA_DLLI,
1481			     sg_dma_address(&req_ctx->ccm_adata_sg),
1482			     AES_BLOCK_SIZE + req_ctx->ccm_hdr_size, NS_BIT);
1483		set_flow_mode(&desc[idx], DIN_HASH);
1484		idx++;
1485	}
1486
1487	/* process the cipher */
1488	if (req_ctx->cryptlen)
1489		cc_proc_cipher_desc(req, cipher_flow_mode, desc, &idx);
1490
1491	/* Read temporal MAC */
1492	hw_desc_init(&desc[idx]);
1493	set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1494	set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, ctx->authsize,
1495		      NS_BIT, 0);
1496	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1497	set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
1498	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1499	set_aes_not_hash_mode(&desc[idx]);
1500	idx++;
1501
1502	/* load AES-CTR state (for last MAC calculation)*/
1503	hw_desc_init(&desc[idx]);
1504	set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1505	set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1506	set_din_type(&desc[idx], DMA_DLLI, req_ctx->ccm_iv0_dma_addr,
1507		     AES_BLOCK_SIZE, NS_BIT);
1508	set_key_size_aes(&desc[idx], ctx->enc_keylen);
1509	set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1510	set_flow_mode(&desc[idx], S_DIN_to_AES);
1511	idx++;
1512
1513	hw_desc_init(&desc[idx]);
1514	set_din_no_dma(&desc[idx], 0, 0xfffff0);
1515	set_dout_no_dma(&desc[idx], 0, 0, 1);
1516	idx++;
1517
1518	/* encrypt the "T" value and store MAC in mac_state */
1519	hw_desc_init(&desc[idx]);
1520	set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1521		     ctx->authsize, NS_BIT);
1522	set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
1523	set_queue_last_ind(ctx->drvdata, &desc[idx]);
1524	set_flow_mode(&desc[idx], DIN_AES_DOUT);
1525	idx++;
1526
1527	*seq_size = idx;
1528	return 0;
1529}
1530
1531static int config_ccm_adata(struct aead_request *req)
1532{
1533	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1534	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1535	struct device *dev = drvdata_to_dev(ctx->drvdata);
1536	struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req);
1537	//unsigned int size_of_a = 0, rem_a_size = 0;
1538	unsigned int lp = req->iv[0];
1539	/* Note: The code assume that req->iv[0] already contains the value
1540	 * of L' of RFC3610
1541	 */
1542	unsigned int l = lp + 1;  /* This is L' of RFC 3610. */
1543	unsigned int m = ctx->authsize;  /* This is M' of RFC 3610. */
1544	u8 *b0 = req_ctx->ccm_config + CCM_B0_OFFSET;
1545	u8 *a0 = req_ctx->ccm_config + CCM_A0_OFFSET;
1546	u8 *ctr_count_0 = req_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
1547	unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
1548				 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1549				req->cryptlen :
1550				(req->cryptlen - ctx->authsize);
1551	int rc;
1552
1553	memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
1554	memset(req_ctx->ccm_config, 0, AES_BLOCK_SIZE * 3);
1555
1556	/* taken from crypto/ccm.c */
1557	/* 2 <= L <= 8, so 1 <= L' <= 7. */
1558	if (l < 2 || l > 8) {
1559		dev_dbg(dev, "illegal iv value %X\n", req->iv[0]);
1560		return -EINVAL;
1561	}
1562	memcpy(b0, req->iv, AES_BLOCK_SIZE);
1563
1564	/* format control info per RFC 3610 and
1565	 * NIST Special Publication 800-38C
1566	 */
1567	*b0 |= (8 * ((m - 2) / 2));
1568	if (req_ctx->assoclen > 0)
1569		*b0 |= 64;  /* Enable bit 6 if Adata exists. */
1570
1571	rc = set_msg_len(b0 + 16 - l, cryptlen, l);  /* Write L'. */
1572	if (rc) {
1573		dev_err(dev, "message len overflow detected");
1574		return rc;
1575	}
1576	 /* END of "taken from crypto/ccm.c" */
1577
1578	/* l(a) - size of associated data. */
1579	req_ctx->ccm_hdr_size = format_ccm_a0(a0, req_ctx->assoclen);
1580
1581	memset(req->iv + 15 - req->iv[0], 0, req->iv[0] + 1);
1582	req->iv[15] = 1;
1583
1584	memcpy(ctr_count_0, req->iv, AES_BLOCK_SIZE);
1585	ctr_count_0[15] = 0;
1586
1587	return 0;
1588}
1589
1590static void cc_proc_rfc4309_ccm(struct aead_request *req)
1591{
1592	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1593	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1594	struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
1595
1596	/* L' */
1597	memset(areq_ctx->ctr_iv, 0, AES_BLOCK_SIZE);
1598	/* For RFC 4309, always use 4 bytes for message length
1599	 * (at most 2^32-1 bytes).
1600	 */
1601	areq_ctx->ctr_iv[0] = 3;
1602
1603	/* In RFC 4309 there is an 11-bytes nonce+IV part,
1604	 * that we build here.
1605	 */
1606	memcpy(areq_ctx->ctr_iv + CCM_BLOCK_NONCE_OFFSET, ctx->ctr_nonce,
1607	       CCM_BLOCK_NONCE_SIZE);
1608	memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv,
1609	       CCM_BLOCK_IV_SIZE);
1610	req->iv = areq_ctx->ctr_iv;
 
1611}
1612
1613static void cc_set_ghash_desc(struct aead_request *req,
1614			      struct cc_hw_desc desc[], unsigned int *seq_size)
1615{
1616	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1617	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1618	struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req);
1619	unsigned int idx = *seq_size;
1620
1621	/* load key to AES*/
1622	hw_desc_init(&desc[idx]);
1623	set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
1624	set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1625	set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1626		     ctx->enc_keylen, NS_BIT);
1627	set_key_size_aes(&desc[idx], ctx->enc_keylen);
1628	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1629	set_flow_mode(&desc[idx], S_DIN_to_AES);
1630	idx++;
1631
1632	/* process one zero block to generate hkey */
1633	hw_desc_init(&desc[idx]);
1634	set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
1635	set_dout_dlli(&desc[idx], req_ctx->hkey_dma_addr, AES_BLOCK_SIZE,
1636		      NS_BIT, 0);
1637	set_flow_mode(&desc[idx], DIN_AES_DOUT);
1638	idx++;
1639
1640	/* Memory Barrier */
1641	hw_desc_init(&desc[idx]);
1642	set_din_no_dma(&desc[idx], 0, 0xfffff0);
1643	set_dout_no_dma(&desc[idx], 0, 0, 1);
1644	idx++;
1645
1646	/* Load GHASH subkey */
1647	hw_desc_init(&desc[idx]);
1648	set_din_type(&desc[idx], DMA_DLLI, req_ctx->hkey_dma_addr,
1649		     AES_BLOCK_SIZE, NS_BIT);
1650	set_dout_no_dma(&desc[idx], 0, 0, 1);
1651	set_flow_mode(&desc[idx], S_DIN_to_HASH);
1652	set_aes_not_hash_mode(&desc[idx]);
1653	set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1654	set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1655	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1656	idx++;
1657
1658	/* Configure Hash Engine to work with GHASH.
1659	 * Since it was not possible to extend HASH submodes to add GHASH,
1660	 * The following command is necessary in order to
1661	 * select GHASH (according to HW designers)
1662	 */
1663	hw_desc_init(&desc[idx]);
1664	set_din_no_dma(&desc[idx], 0, 0xfffff0);
1665	set_dout_no_dma(&desc[idx], 0, 0, 1);
1666	set_flow_mode(&desc[idx], S_DIN_to_HASH);
1667	set_aes_not_hash_mode(&desc[idx]);
1668	set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1669	set_cipher_do(&desc[idx], 1); //1=AES_SK RKEK
1670	set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1671	set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1672	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1673	idx++;
1674
1675	/* Load GHASH initial STATE (which is 0). (for any hash there is an
1676	 * initial state)
1677	 */
1678	hw_desc_init(&desc[idx]);
1679	set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
1680	set_dout_no_dma(&desc[idx], 0, 0, 1);
1681	set_flow_mode(&desc[idx], S_DIN_to_HASH);
1682	set_aes_not_hash_mode(&desc[idx]);
1683	set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1684	set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1685	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1686	idx++;
1687
1688	*seq_size = idx;
1689}
1690
1691static void cc_set_gctr_desc(struct aead_request *req, struct cc_hw_desc desc[],
1692			     unsigned int *seq_size)
1693{
1694	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1695	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1696	struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req);
1697	unsigned int idx = *seq_size;
1698
1699	/* load key to AES*/
1700	hw_desc_init(&desc[idx]);
1701	set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1702	set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1703	set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1704		     ctx->enc_keylen, NS_BIT);
1705	set_key_size_aes(&desc[idx], ctx->enc_keylen);
1706	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1707	set_flow_mode(&desc[idx], S_DIN_to_AES);
1708	idx++;
1709
1710	if (req_ctx->cryptlen && !req_ctx->plaintext_authenticate_only) {
1711		/* load AES/CTR initial CTR value inc by 2*/
1712		hw_desc_init(&desc[idx]);
1713		set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1714		set_key_size_aes(&desc[idx], ctx->enc_keylen);
1715		set_din_type(&desc[idx], DMA_DLLI,
1716			     req_ctx->gcm_iv_inc2_dma_addr, AES_BLOCK_SIZE,
1717			     NS_BIT);
1718		set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1719		set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1720		set_flow_mode(&desc[idx], S_DIN_to_AES);
1721		idx++;
1722	}
1723
1724	*seq_size = idx;
1725}
1726
1727static void cc_proc_gcm_result(struct aead_request *req,
1728			       struct cc_hw_desc desc[],
1729			       unsigned int *seq_size)
1730{
1731	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1732	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1733	struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req);
1734	dma_addr_t mac_result;
1735	unsigned int idx = *seq_size;
1736
1737	if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1738		mac_result = req_ctx->mac_buf_dma_addr;
1739	} else { /* Encrypt */
1740		mac_result = req_ctx->icv_dma_addr;
1741	}
1742
1743	/* process(ghash) gcm_block_len */
1744	hw_desc_init(&desc[idx]);
1745	set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_block_len_dma_addr,
1746		     AES_BLOCK_SIZE, NS_BIT);
1747	set_flow_mode(&desc[idx], DIN_HASH);
1748	idx++;
1749
1750	/* Store GHASH state after GHASH(Associated Data + Cipher +LenBlock) */
1751	hw_desc_init(&desc[idx]);
1752	set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1753	set_din_no_dma(&desc[idx], 0, 0xfffff0);
1754	set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, AES_BLOCK_SIZE,
1755		      NS_BIT, 0);
1756	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1757	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1758	set_aes_not_hash_mode(&desc[idx]);
1759
1760	idx++;
1761
1762	/* load AES/CTR initial CTR value inc by 1*/
1763	hw_desc_init(&desc[idx]);
1764	set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1765	set_key_size_aes(&desc[idx], ctx->enc_keylen);
1766	set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_iv_inc1_dma_addr,
1767		     AES_BLOCK_SIZE, NS_BIT);
1768	set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1769	set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1770	set_flow_mode(&desc[idx], S_DIN_to_AES);
1771	idx++;
1772
1773	/* Memory Barrier */
1774	hw_desc_init(&desc[idx]);
1775	set_din_no_dma(&desc[idx], 0, 0xfffff0);
1776	set_dout_no_dma(&desc[idx], 0, 0, 1);
1777	idx++;
1778
1779	/* process GCTR on stored GHASH and store MAC in mac_state*/
1780	hw_desc_init(&desc[idx]);
1781	set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1782	set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1783		     AES_BLOCK_SIZE, NS_BIT);
1784	set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
1785	set_queue_last_ind(ctx->drvdata, &desc[idx]);
1786	set_flow_mode(&desc[idx], DIN_AES_DOUT);
1787	idx++;
1788
1789	*seq_size = idx;
1790}
1791
1792static int cc_gcm(struct aead_request *req, struct cc_hw_desc desc[],
1793		  unsigned int *seq_size)
1794{
1795	struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req);
1796	unsigned int cipher_flow_mode;
1797
 
 
 
 
 
 
1798	//in RFC4543 no data to encrypt. just copy data from src to dest.
1799	if (req_ctx->plaintext_authenticate_only) {
1800		cc_proc_cipher_desc(req, BYPASS, desc, seq_size);
1801		cc_set_ghash_desc(req, desc, seq_size);
1802		/* process(ghash) assoc data */
1803		cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
1804		cc_set_gctr_desc(req, desc, seq_size);
1805		cc_proc_gcm_result(req, desc, seq_size);
1806		return 0;
1807	}
1808
1809	if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1810		cipher_flow_mode = AES_and_HASH;
1811	} else { /* Encrypt */
1812		cipher_flow_mode = AES_to_HASH_and_DOUT;
1813	}
1814
1815	// for gcm and rfc4106.
1816	cc_set_ghash_desc(req, desc, seq_size);
1817	/* process(ghash) assoc data */
1818	if (req_ctx->assoclen > 0)
1819		cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
1820	cc_set_gctr_desc(req, desc, seq_size);
1821	/* process(gctr+ghash) */
1822	if (req_ctx->cryptlen)
1823		cc_proc_cipher_desc(req, cipher_flow_mode, desc, seq_size);
1824	cc_proc_gcm_result(req, desc, seq_size);
1825
1826	return 0;
1827}
1828
1829static int config_gcm_context(struct aead_request *req)
1830{
1831	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1832	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1833	struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req);
1834	struct device *dev = drvdata_to_dev(ctx->drvdata);
1835
1836	unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
1837				 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1838				req->cryptlen :
1839				(req->cryptlen - ctx->authsize);
1840	__be32 counter = cpu_to_be32(2);
1841
1842	dev_dbg(dev, "%s() cryptlen = %d, req_ctx->assoclen = %d ctx->authsize = %d\n",
1843		__func__, cryptlen, req_ctx->assoclen, ctx->authsize);
1844
1845	memset(req_ctx->hkey, 0, AES_BLOCK_SIZE);
1846
1847	memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
1848
1849	memcpy(req->iv + 12, &counter, 4);
1850	memcpy(req_ctx->gcm_iv_inc2, req->iv, 16);
1851
1852	counter = cpu_to_be32(1);
1853	memcpy(req->iv + 12, &counter, 4);
1854	memcpy(req_ctx->gcm_iv_inc1, req->iv, 16);
1855
1856	if (!req_ctx->plaintext_authenticate_only) {
1857		__be64 temp64;
1858
1859		temp64 = cpu_to_be64(req_ctx->assoclen * 8);
1860		memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
1861		temp64 = cpu_to_be64(cryptlen * 8);
1862		memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
1863	} else {
1864		/* rfc4543=>  all data(AAD,IV,Plain) are considered additional
1865		 * data that is nothing is encrypted.
1866		 */
1867		__be64 temp64;
1868
1869		temp64 = cpu_to_be64((req_ctx->assoclen + cryptlen) * 8);
 
1870		memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
1871		temp64 = 0;
1872		memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
1873	}
1874
1875	return 0;
1876}
1877
1878static void cc_proc_rfc4_gcm(struct aead_request *req)
1879{
1880	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1881	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1882	struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
1883
1884	memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_NONCE_OFFSET,
1885	       ctx->ctr_nonce, GCM_BLOCK_RFC4_NONCE_SIZE);
1886	memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv,
1887	       GCM_BLOCK_RFC4_IV_SIZE);
1888	req->iv = areq_ctx->ctr_iv;
 
1889}
1890
1891static int cc_proc_aead(struct aead_request *req,
1892			enum drv_crypto_direction direct)
1893{
1894	int rc = 0;
1895	int seq_len = 0;
1896	struct cc_hw_desc desc[MAX_AEAD_PROCESS_SEQ];
1897	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1898	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1899	struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
1900	struct device *dev = drvdata_to_dev(ctx->drvdata);
1901	struct cc_crypto_req cc_req = {};
1902
1903	dev_dbg(dev, "%s context=%p req=%p iv=%p src=%p src_ofs=%d dst=%p dst_ofs=%d cryptolen=%d\n",
1904		((direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Enc" : "Dec"),
1905		ctx, req, req->iv, sg_virt(req->src), req->src->offset,
1906		sg_virt(req->dst), req->dst->offset, req->cryptlen);
1907
1908	/* STAT_PHASE_0: Init and sanity checks */
1909
1910	/* Check data length according to mode */
1911	if (validate_data_size(ctx, direct, req)) {
1912		dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n",
1913			req->cryptlen, areq_ctx->assoclen);
 
1914		return -EINVAL;
1915	}
1916
1917	/* Setup request structure */
1918	cc_req.user_cb = cc_aead_complete;
1919	cc_req.user_arg = req;
1920
1921	/* Setup request context */
1922	areq_ctx->gen_ctx.op_type = direct;
1923	areq_ctx->req_authsize = ctx->authsize;
1924	areq_ctx->cipher_mode = ctx->cipher_mode;
1925
1926	/* STAT_PHASE_1: Map buffers */
1927
1928	if (ctx->cipher_mode == DRV_CIPHER_CTR) {
1929		/* Build CTR IV - Copy nonce from last 4 bytes in
1930		 * CTR key to first 4 bytes in CTR IV
1931		 */
1932		memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce,
1933		       CTR_RFC3686_NONCE_SIZE);
1934		memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE, req->iv,
1935		       CTR_RFC3686_IV_SIZE);
1936		/* Initialize counter portion of counter block */
1937		*(__be32 *)(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE +
1938			    CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1939
1940		/* Replace with counter iv */
1941		req->iv = areq_ctx->ctr_iv;
1942		areq_ctx->hw_iv_size = CTR_RFC3686_BLOCK_SIZE;
1943	} else if ((ctx->cipher_mode == DRV_CIPHER_CCM) ||
1944		   (ctx->cipher_mode == DRV_CIPHER_GCTR)) {
1945		areq_ctx->hw_iv_size = AES_BLOCK_SIZE;
1946		if (areq_ctx->ctr_iv != req->iv) {
1947			memcpy(areq_ctx->ctr_iv, req->iv,
1948			       crypto_aead_ivsize(tfm));
1949			req->iv = areq_ctx->ctr_iv;
1950		}
1951	}  else {
1952		areq_ctx->hw_iv_size = crypto_aead_ivsize(tfm);
1953	}
1954
1955	if (ctx->cipher_mode == DRV_CIPHER_CCM) {
1956		rc = config_ccm_adata(req);
1957		if (rc) {
1958			dev_dbg(dev, "config_ccm_adata() returned with a failure %d!",
1959				rc);
1960			goto exit;
1961		}
1962	} else {
1963		areq_ctx->ccm_hdr_size = ccm_header_size_null;
1964	}
1965
1966	if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
1967		rc = config_gcm_context(req);
1968		if (rc) {
1969			dev_dbg(dev, "config_gcm_context() returned with a failure %d!",
1970				rc);
1971			goto exit;
1972		}
1973	}
1974
1975	rc = cc_map_aead_request(ctx->drvdata, req);
1976	if (rc) {
1977		dev_err(dev, "map_request() failed\n");
1978		goto exit;
1979	}
1980
1981	/* STAT_PHASE_2: Create sequence */
1982
1983	/* Load MLLI tables to SRAM if necessary */
1984	cc_mlli_to_sram(req, desc, &seq_len);
1985
 
1986	switch (ctx->auth_mode) {
1987	case DRV_HASH_SHA1:
1988	case DRV_HASH_SHA256:
1989		cc_hmac_authenc(req, desc, &seq_len);
1990		break;
1991	case DRV_HASH_XCBC_MAC:
1992		cc_xcbc_authenc(req, desc, &seq_len);
1993		break;
1994	case DRV_HASH_NULL:
1995		if (ctx->cipher_mode == DRV_CIPHER_CCM)
1996			cc_ccm(req, desc, &seq_len);
1997		if (ctx->cipher_mode == DRV_CIPHER_GCTR)
1998			cc_gcm(req, desc, &seq_len);
1999		break;
2000	default:
2001		dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
2002		cc_unmap_aead_request(dev, req);
2003		rc = -ENOTSUPP;
2004		goto exit;
2005	}
2006
2007	/* STAT_PHASE_3: Lock HW and push sequence */
2008
2009	rc = cc_send_request(ctx->drvdata, &cc_req, desc, seq_len, &req->base);
2010
2011	if (rc != -EINPROGRESS && rc != -EBUSY) {
2012		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
2013		cc_unmap_aead_request(dev, req);
2014	}
2015
2016exit:
2017	return rc;
2018}
2019
2020static int cc_aead_encrypt(struct aead_request *req)
2021{
2022	struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
2023	int rc;
2024
2025	memset(areq_ctx, 0, sizeof(*areq_ctx));
2026
2027	/* No generated IV required */
2028	areq_ctx->backup_iv = req->iv;
2029	areq_ctx->assoclen = req->assoclen;
 
 
 
2030
2031	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2032	if (rc != -EINPROGRESS && rc != -EBUSY)
2033		req->iv = areq_ctx->backup_iv;
2034
2035	return rc;
2036}
2037
2038static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
2039{
2040	/* Very similar to cc_aead_encrypt() above. */
2041
2042	struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
2043	int rc;
 
 
 
2044
2045	rc = crypto_ipsec_check_assoclen(req->assoclen);
2046	if (rc)
2047		goto out;
 
2048
2049	memset(areq_ctx, 0, sizeof(*areq_ctx));
2050
2051	/* No generated IV required */
2052	areq_ctx->backup_iv = req->iv;
2053	areq_ctx->assoclen = req->assoclen - CCM_BLOCK_IV_SIZE;
 
2054
2055	cc_proc_rfc4309_ccm(req);
2056
2057	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2058	if (rc != -EINPROGRESS && rc != -EBUSY)
2059		req->iv = areq_ctx->backup_iv;
2060out:
2061	return rc;
2062}
2063
2064static int cc_aead_decrypt(struct aead_request *req)
2065{
2066	struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
2067	int rc;
2068
2069	memset(areq_ctx, 0, sizeof(*areq_ctx));
2070
2071	/* No generated IV required */
2072	areq_ctx->backup_iv = req->iv;
2073	areq_ctx->assoclen = req->assoclen;
 
 
 
2074
2075	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2076	if (rc != -EINPROGRESS && rc != -EBUSY)
2077		req->iv = areq_ctx->backup_iv;
2078
2079	return rc;
2080}
2081
2082static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
2083{
2084	struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
2085	int rc;
 
 
 
2086
2087	rc = crypto_ipsec_check_assoclen(req->assoclen);
2088	if (rc)
2089		goto out;
 
2090
2091	memset(areq_ctx, 0, sizeof(*areq_ctx));
2092
2093	/* No generated IV required */
2094	areq_ctx->backup_iv = req->iv;
2095	areq_ctx->assoclen = req->assoclen - CCM_BLOCK_IV_SIZE;
2096
 
2097	cc_proc_rfc4309_ccm(req);
2098
2099	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2100	if (rc != -EINPROGRESS && rc != -EBUSY)
2101		req->iv = areq_ctx->backup_iv;
2102
2103out:
2104	return rc;
2105}
2106
2107static int cc_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
2108				 unsigned int keylen)
2109{
2110	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2111	struct device *dev = drvdata_to_dev(ctx->drvdata);
2112
2113	dev_dbg(dev, "%s()  keylen %d, key %p\n", __func__, keylen, key);
2114
2115	if (keylen < 4)
2116		return -EINVAL;
2117
2118	keylen -= 4;
2119	memcpy(ctx->ctr_nonce, key + keylen, 4);
2120
2121	return cc_aead_setkey(tfm, key, keylen);
2122}
2123
2124static int cc_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
2125				 unsigned int keylen)
2126{
2127	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2128	struct device *dev = drvdata_to_dev(ctx->drvdata);
2129
2130	dev_dbg(dev, "%s()  keylen %d, key %p\n", __func__, keylen, key);
2131
2132	if (keylen < 4)
2133		return -EINVAL;
2134
2135	keylen -= 4;
2136	memcpy(ctx->ctr_nonce, key + keylen, 4);
2137
2138	return cc_aead_setkey(tfm, key, keylen);
2139}
2140
2141static int cc_gcm_setauthsize(struct crypto_aead *authenc,
2142			      unsigned int authsize)
2143{
2144	switch (authsize) {
2145	case 4:
2146	case 8:
2147	case 12:
2148	case 13:
2149	case 14:
2150	case 15:
2151	case 16:
2152		break;
2153	default:
2154		return -EINVAL;
2155	}
2156
2157	return cc_aead_setauthsize(authenc, authsize);
2158}
2159
2160static int cc_rfc4106_gcm_setauthsize(struct crypto_aead *authenc,
2161				      unsigned int authsize)
2162{
2163	struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
2164	struct device *dev = drvdata_to_dev(ctx->drvdata);
2165
2166	dev_dbg(dev, "authsize %d\n", authsize);
2167
2168	switch (authsize) {
2169	case 8:
2170	case 12:
2171	case 16:
2172		break;
2173	default:
2174		return -EINVAL;
2175	}
2176
2177	return cc_aead_setauthsize(authenc, authsize);
2178}
2179
2180static int cc_rfc4543_gcm_setauthsize(struct crypto_aead *authenc,
2181				      unsigned int authsize)
2182{
2183	struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
2184	struct device *dev = drvdata_to_dev(ctx->drvdata);
2185
2186	dev_dbg(dev, "authsize %d\n", authsize);
2187
2188	if (authsize != 16)
2189		return -EINVAL;
2190
2191	return cc_aead_setauthsize(authenc, authsize);
2192}
2193
2194static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
2195{
2196	struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
2197	int rc;
2198
2199	rc = crypto_ipsec_check_assoclen(req->assoclen);
2200	if (rc)
 
 
 
 
 
 
2201		goto out;
 
2202
2203	memset(areq_ctx, 0, sizeof(*areq_ctx));
2204
2205	/* No generated IV required */
2206	areq_ctx->backup_iv = req->iv;
2207	areq_ctx->assoclen = req->assoclen - GCM_BLOCK_RFC4_IV_SIZE;
 
2208
2209	cc_proc_rfc4_gcm(req);
 
2210
2211	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2212	if (rc != -EINPROGRESS && rc != -EBUSY)
2213		req->iv = areq_ctx->backup_iv;
2214out:
2215	return rc;
2216}
2217
2218static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
2219{
2220	struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
2221	int rc;
 
 
 
 
2222
2223	rc = crypto_ipsec_check_assoclen(req->assoclen);
2224	if (rc)
2225		goto out;
 
2226
2227	memset(areq_ctx, 0, sizeof(*areq_ctx));
2228
2229	//plaintext is not encryped with rfc4543
2230	areq_ctx->plaintext_authenticate_only = true;
2231
2232	/* No generated IV required */
2233	areq_ctx->backup_iv = req->iv;
2234	areq_ctx->assoclen = req->assoclen;
2235
2236	cc_proc_rfc4_gcm(req);
 
2237
2238	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2239	if (rc != -EINPROGRESS && rc != -EBUSY)
2240		req->iv = areq_ctx->backup_iv;
2241out:
2242	return rc;
2243}
2244
2245static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
2246{
2247	struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
2248	int rc;
2249
2250	rc = crypto_ipsec_check_assoclen(req->assoclen);
2251	if (rc)
 
 
 
 
 
 
2252		goto out;
 
2253
2254	memset(areq_ctx, 0, sizeof(*areq_ctx));
2255
2256	/* No generated IV required */
2257	areq_ctx->backup_iv = req->iv;
2258	areq_ctx->assoclen = req->assoclen - GCM_BLOCK_RFC4_IV_SIZE;
 
2259
2260	cc_proc_rfc4_gcm(req);
 
2261
2262	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2263	if (rc != -EINPROGRESS && rc != -EBUSY)
2264		req->iv = areq_ctx->backup_iv;
2265out:
2266	return rc;
2267}
2268
2269static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
2270{
2271	struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req);
2272	int rc;
 
 
 
 
2273
2274	rc = crypto_ipsec_check_assoclen(req->assoclen);
2275	if (rc)
2276		goto out;
 
2277
2278	memset(areq_ctx, 0, sizeof(*areq_ctx));
2279
2280	//plaintext is not decryped with rfc4543
2281	areq_ctx->plaintext_authenticate_only = true;
2282
2283	/* No generated IV required */
2284	areq_ctx->backup_iv = req->iv;
2285	areq_ctx->assoclen = req->assoclen;
2286
2287	cc_proc_rfc4_gcm(req);
 
2288
2289	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2290	if (rc != -EINPROGRESS && rc != -EBUSY)
2291		req->iv = areq_ctx->backup_iv;
2292out:
2293	return rc;
2294}
2295
2296/* aead alg */
2297static struct cc_alg_template aead_algs[] = {
2298	{
2299		.name = "authenc(hmac(sha1),cbc(aes))",
2300		.driver_name = "authenc-hmac-sha1-cbc-aes-ccree",
2301		.blocksize = AES_BLOCK_SIZE,
2302		.template_aead = {
2303			.setkey = cc_aead_setkey,
2304			.setauthsize = cc_aead_setauthsize,
2305			.encrypt = cc_aead_encrypt,
2306			.decrypt = cc_aead_decrypt,
2307			.init = cc_aead_init,
2308			.exit = cc_aead_exit,
2309			.ivsize = AES_BLOCK_SIZE,
2310			.maxauthsize = SHA1_DIGEST_SIZE,
2311		},
2312		.cipher_mode = DRV_CIPHER_CBC,
2313		.flow_mode = S_DIN_to_AES,
2314		.auth_mode = DRV_HASH_SHA1,
2315		.min_hw_rev = CC_HW_REV_630,
2316		.std_body = CC_STD_NIST,
2317	},
2318	{
2319		.name = "authenc(hmac(sha1),cbc(des3_ede))",
2320		.driver_name = "authenc-hmac-sha1-cbc-des3-ccree",
2321		.blocksize = DES3_EDE_BLOCK_SIZE,
2322		.template_aead = {
2323			.setkey = cc_des3_aead_setkey,
2324			.setauthsize = cc_aead_setauthsize,
2325			.encrypt = cc_aead_encrypt,
2326			.decrypt = cc_aead_decrypt,
2327			.init = cc_aead_init,
2328			.exit = cc_aead_exit,
2329			.ivsize = DES3_EDE_BLOCK_SIZE,
2330			.maxauthsize = SHA1_DIGEST_SIZE,
2331		},
2332		.cipher_mode = DRV_CIPHER_CBC,
2333		.flow_mode = S_DIN_to_DES,
2334		.auth_mode = DRV_HASH_SHA1,
2335		.min_hw_rev = CC_HW_REV_630,
2336		.std_body = CC_STD_NIST,
2337	},
2338	{
2339		.name = "authenc(hmac(sha256),cbc(aes))",
2340		.driver_name = "authenc-hmac-sha256-cbc-aes-ccree",
2341		.blocksize = AES_BLOCK_SIZE,
2342		.template_aead = {
2343			.setkey = cc_aead_setkey,
2344			.setauthsize = cc_aead_setauthsize,
2345			.encrypt = cc_aead_encrypt,
2346			.decrypt = cc_aead_decrypt,
2347			.init = cc_aead_init,
2348			.exit = cc_aead_exit,
2349			.ivsize = AES_BLOCK_SIZE,
2350			.maxauthsize = SHA256_DIGEST_SIZE,
2351		},
2352		.cipher_mode = DRV_CIPHER_CBC,
2353		.flow_mode = S_DIN_to_AES,
2354		.auth_mode = DRV_HASH_SHA256,
2355		.min_hw_rev = CC_HW_REV_630,
2356		.std_body = CC_STD_NIST,
2357	},
2358	{
2359		.name = "authenc(hmac(sha256),cbc(des3_ede))",
2360		.driver_name = "authenc-hmac-sha256-cbc-des3-ccree",
2361		.blocksize = DES3_EDE_BLOCK_SIZE,
2362		.template_aead = {
2363			.setkey = cc_des3_aead_setkey,
2364			.setauthsize = cc_aead_setauthsize,
2365			.encrypt = cc_aead_encrypt,
2366			.decrypt = cc_aead_decrypt,
2367			.init = cc_aead_init,
2368			.exit = cc_aead_exit,
2369			.ivsize = DES3_EDE_BLOCK_SIZE,
2370			.maxauthsize = SHA256_DIGEST_SIZE,
2371		},
2372		.cipher_mode = DRV_CIPHER_CBC,
2373		.flow_mode = S_DIN_to_DES,
2374		.auth_mode = DRV_HASH_SHA256,
2375		.min_hw_rev = CC_HW_REV_630,
2376		.std_body = CC_STD_NIST,
2377	},
2378	{
2379		.name = "authenc(xcbc(aes),cbc(aes))",
2380		.driver_name = "authenc-xcbc-aes-cbc-aes-ccree",
2381		.blocksize = AES_BLOCK_SIZE,
2382		.template_aead = {
2383			.setkey = cc_aead_setkey,
2384			.setauthsize = cc_aead_setauthsize,
2385			.encrypt = cc_aead_encrypt,
2386			.decrypt = cc_aead_decrypt,
2387			.init = cc_aead_init,
2388			.exit = cc_aead_exit,
2389			.ivsize = AES_BLOCK_SIZE,
2390			.maxauthsize = AES_BLOCK_SIZE,
2391		},
2392		.cipher_mode = DRV_CIPHER_CBC,
2393		.flow_mode = S_DIN_to_AES,
2394		.auth_mode = DRV_HASH_XCBC_MAC,
2395		.min_hw_rev = CC_HW_REV_630,
2396		.std_body = CC_STD_NIST,
2397	},
2398	{
2399		.name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
2400		.driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-ccree",
2401		.blocksize = 1,
2402		.template_aead = {
2403			.setkey = cc_aead_setkey,
2404			.setauthsize = cc_aead_setauthsize,
2405			.encrypt = cc_aead_encrypt,
2406			.decrypt = cc_aead_decrypt,
2407			.init = cc_aead_init,
2408			.exit = cc_aead_exit,
2409			.ivsize = CTR_RFC3686_IV_SIZE,
2410			.maxauthsize = SHA1_DIGEST_SIZE,
2411		},
2412		.cipher_mode = DRV_CIPHER_CTR,
2413		.flow_mode = S_DIN_to_AES,
2414		.auth_mode = DRV_HASH_SHA1,
2415		.min_hw_rev = CC_HW_REV_630,
2416		.std_body = CC_STD_NIST,
2417	},
2418	{
2419		.name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
2420		.driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-ccree",
2421		.blocksize = 1,
2422		.template_aead = {
2423			.setkey = cc_aead_setkey,
2424			.setauthsize = cc_aead_setauthsize,
2425			.encrypt = cc_aead_encrypt,
2426			.decrypt = cc_aead_decrypt,
2427			.init = cc_aead_init,
2428			.exit = cc_aead_exit,
2429			.ivsize = CTR_RFC3686_IV_SIZE,
2430			.maxauthsize = SHA256_DIGEST_SIZE,
2431		},
2432		.cipher_mode = DRV_CIPHER_CTR,
2433		.flow_mode = S_DIN_to_AES,
2434		.auth_mode = DRV_HASH_SHA256,
2435		.min_hw_rev = CC_HW_REV_630,
2436		.std_body = CC_STD_NIST,
2437	},
2438	{
2439		.name = "authenc(xcbc(aes),rfc3686(ctr(aes)))",
2440		.driver_name = "authenc-xcbc-aes-rfc3686-ctr-aes-ccree",
2441		.blocksize = 1,
2442		.template_aead = {
2443			.setkey = cc_aead_setkey,
2444			.setauthsize = cc_aead_setauthsize,
2445			.encrypt = cc_aead_encrypt,
2446			.decrypt = cc_aead_decrypt,
2447			.init = cc_aead_init,
2448			.exit = cc_aead_exit,
2449			.ivsize = CTR_RFC3686_IV_SIZE,
2450			.maxauthsize = AES_BLOCK_SIZE,
2451		},
2452		.cipher_mode = DRV_CIPHER_CTR,
2453		.flow_mode = S_DIN_to_AES,
2454		.auth_mode = DRV_HASH_XCBC_MAC,
2455		.min_hw_rev = CC_HW_REV_630,
2456		.std_body = CC_STD_NIST,
2457	},
2458	{
2459		.name = "ccm(aes)",
2460		.driver_name = "ccm-aes-ccree",
2461		.blocksize = 1,
2462		.template_aead = {
2463			.setkey = cc_aead_setkey,
2464			.setauthsize = cc_ccm_setauthsize,
2465			.encrypt = cc_aead_encrypt,
2466			.decrypt = cc_aead_decrypt,
2467			.init = cc_aead_init,
2468			.exit = cc_aead_exit,
2469			.ivsize = AES_BLOCK_SIZE,
2470			.maxauthsize = AES_BLOCK_SIZE,
2471		},
2472		.cipher_mode = DRV_CIPHER_CCM,
2473		.flow_mode = S_DIN_to_AES,
2474		.auth_mode = DRV_HASH_NULL,
2475		.min_hw_rev = CC_HW_REV_630,
2476		.std_body = CC_STD_NIST,
2477	},
2478	{
2479		.name = "rfc4309(ccm(aes))",
2480		.driver_name = "rfc4309-ccm-aes-ccree",
2481		.blocksize = 1,
2482		.template_aead = {
2483			.setkey = cc_rfc4309_ccm_setkey,
2484			.setauthsize = cc_rfc4309_ccm_setauthsize,
2485			.encrypt = cc_rfc4309_ccm_encrypt,
2486			.decrypt = cc_rfc4309_ccm_decrypt,
2487			.init = cc_aead_init,
2488			.exit = cc_aead_exit,
2489			.ivsize = CCM_BLOCK_IV_SIZE,
2490			.maxauthsize = AES_BLOCK_SIZE,
2491		},
2492		.cipher_mode = DRV_CIPHER_CCM,
2493		.flow_mode = S_DIN_to_AES,
2494		.auth_mode = DRV_HASH_NULL,
2495		.min_hw_rev = CC_HW_REV_630,
2496		.std_body = CC_STD_NIST,
2497	},
2498	{
2499		.name = "gcm(aes)",
2500		.driver_name = "gcm-aes-ccree",
2501		.blocksize = 1,
2502		.template_aead = {
2503			.setkey = cc_aead_setkey,
2504			.setauthsize = cc_gcm_setauthsize,
2505			.encrypt = cc_aead_encrypt,
2506			.decrypt = cc_aead_decrypt,
2507			.init = cc_aead_init,
2508			.exit = cc_aead_exit,
2509			.ivsize = 12,
2510			.maxauthsize = AES_BLOCK_SIZE,
2511		},
2512		.cipher_mode = DRV_CIPHER_GCTR,
2513		.flow_mode = S_DIN_to_AES,
2514		.auth_mode = DRV_HASH_NULL,
2515		.min_hw_rev = CC_HW_REV_630,
2516		.std_body = CC_STD_NIST,
2517	},
2518	{
2519		.name = "rfc4106(gcm(aes))",
2520		.driver_name = "rfc4106-gcm-aes-ccree",
2521		.blocksize = 1,
2522		.template_aead = {
2523			.setkey = cc_rfc4106_gcm_setkey,
2524			.setauthsize = cc_rfc4106_gcm_setauthsize,
2525			.encrypt = cc_rfc4106_gcm_encrypt,
2526			.decrypt = cc_rfc4106_gcm_decrypt,
2527			.init = cc_aead_init,
2528			.exit = cc_aead_exit,
2529			.ivsize = GCM_BLOCK_RFC4_IV_SIZE,
2530			.maxauthsize = AES_BLOCK_SIZE,
2531		},
2532		.cipher_mode = DRV_CIPHER_GCTR,
2533		.flow_mode = S_DIN_to_AES,
2534		.auth_mode = DRV_HASH_NULL,
2535		.min_hw_rev = CC_HW_REV_630,
2536		.std_body = CC_STD_NIST,
2537	},
2538	{
2539		.name = "rfc4543(gcm(aes))",
2540		.driver_name = "rfc4543-gcm-aes-ccree",
2541		.blocksize = 1,
2542		.template_aead = {
2543			.setkey = cc_rfc4543_gcm_setkey,
2544			.setauthsize = cc_rfc4543_gcm_setauthsize,
2545			.encrypt = cc_rfc4543_gcm_encrypt,
2546			.decrypt = cc_rfc4543_gcm_decrypt,
2547			.init = cc_aead_init,
2548			.exit = cc_aead_exit,
2549			.ivsize = GCM_BLOCK_RFC4_IV_SIZE,
2550			.maxauthsize = AES_BLOCK_SIZE,
2551		},
2552		.cipher_mode = DRV_CIPHER_GCTR,
2553		.flow_mode = S_DIN_to_AES,
2554		.auth_mode = DRV_HASH_NULL,
2555		.min_hw_rev = CC_HW_REV_630,
2556		.std_body = CC_STD_NIST,
2557	},
2558};
2559
2560static struct cc_crypto_alg *cc_create_aead_alg(struct cc_alg_template *tmpl,
2561						struct device *dev)
2562{
2563	struct cc_crypto_alg *t_alg;
2564	struct aead_alg *alg;
2565
2566	t_alg = devm_kzalloc(dev, sizeof(*t_alg), GFP_KERNEL);
2567	if (!t_alg)
2568		return ERR_PTR(-ENOMEM);
2569
2570	alg = &tmpl->template_aead;
2571
2572	if (snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s",
2573		     tmpl->name) >= CRYPTO_MAX_ALG_NAME)
2574		return ERR_PTR(-EINVAL);
2575	if (snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2576		     tmpl->driver_name) >= CRYPTO_MAX_ALG_NAME)
2577		return ERR_PTR(-EINVAL);
2578
2579	alg->base.cra_module = THIS_MODULE;
2580	alg->base.cra_priority = CC_CRA_PRIO;
2581
2582	alg->base.cra_ctxsize = sizeof(struct cc_aead_ctx);
2583	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
2584	alg->base.cra_blocksize = tmpl->blocksize;
2585	alg->init = cc_aead_init;
2586	alg->exit = cc_aead_exit;
2587
2588	t_alg->aead_alg = *alg;
2589
2590	t_alg->cipher_mode = tmpl->cipher_mode;
2591	t_alg->flow_mode = tmpl->flow_mode;
2592	t_alg->auth_mode = tmpl->auth_mode;
2593
2594	return t_alg;
2595}
2596
2597int cc_aead_free(struct cc_drvdata *drvdata)
2598{
2599	struct cc_crypto_alg *t_alg, *n;
2600	struct cc_aead_handle *aead_handle = drvdata->aead_handle;
 
2601
2602	/* Remove registered algs */
2603	list_for_each_entry_safe(t_alg, n, &aead_handle->aead_list, entry) {
2604		crypto_unregister_aead(&t_alg->aead_alg);
2605		list_del(&t_alg->entry);
 
 
 
 
 
 
2606	}
2607
2608	return 0;
2609}
2610
2611int cc_aead_alloc(struct cc_drvdata *drvdata)
2612{
2613	struct cc_aead_handle *aead_handle;
2614	struct cc_crypto_alg *t_alg;
2615	int rc = -ENOMEM;
2616	int alg;
2617	struct device *dev = drvdata_to_dev(drvdata);
2618
2619	aead_handle = devm_kmalloc(dev, sizeof(*aead_handle), GFP_KERNEL);
2620	if (!aead_handle) {
2621		rc = -ENOMEM;
2622		goto fail0;
2623	}
2624
2625	INIT_LIST_HEAD(&aead_handle->aead_list);
2626	drvdata->aead_handle = aead_handle;
2627
2628	aead_handle->sram_workspace_addr = cc_sram_alloc(drvdata,
2629							 MAX_HMAC_DIGEST_SIZE);
2630
2631	if (aead_handle->sram_workspace_addr == NULL_SRAM_ADDR) {
 
2632		rc = -ENOMEM;
2633		goto fail1;
2634	}
2635
2636	/* Linux crypto */
2637	for (alg = 0; alg < ARRAY_SIZE(aead_algs); alg++) {
2638		if ((aead_algs[alg].min_hw_rev > drvdata->hw_rev) ||
2639		    !(drvdata->std_bodies & aead_algs[alg].std_body))
2640			continue;
2641
2642		t_alg = cc_create_aead_alg(&aead_algs[alg], dev);
2643		if (IS_ERR(t_alg)) {
2644			rc = PTR_ERR(t_alg);
2645			dev_err(dev, "%s alg allocation failed\n",
2646				aead_algs[alg].driver_name);
2647			goto fail1;
2648		}
2649		t_alg->drvdata = drvdata;
2650		rc = crypto_register_aead(&t_alg->aead_alg);
2651		if (rc) {
2652			dev_err(dev, "%s alg registration failed\n",
2653				t_alg->aead_alg.base.cra_driver_name);
2654			goto fail1;
 
 
 
 
2655		}
2656
2657		list_add_tail(&t_alg->entry, &aead_handle->aead_list);
2658		dev_dbg(dev, "Registered %s\n",
2659			t_alg->aead_alg.base.cra_driver_name);
2660	}
2661
2662	return 0;
2663
 
 
2664fail1:
2665	cc_aead_free(drvdata);
2666fail0:
2667	return rc;
2668}
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
   3
   4#include <linux/kernel.h>
   5#include <linux/module.h>
   6#include <crypto/algapi.h>
   7#include <crypto/internal/aead.h>
   8#include <crypto/authenc.h>
 
 
   9#include <crypto/internal/des.h>
  10#include <linux/rtnetlink.h>
  11#include "cc_driver.h"
  12#include "cc_buffer_mgr.h"
  13#include "cc_aead.h"
  14#include "cc_request_mgr.h"
  15#include "cc_hash.h"
  16#include "cc_sram_mgr.h"
  17
  18#define template_aead	template_u.aead
  19
  20#define MAX_AEAD_SETKEY_SEQ 12
  21#define MAX_AEAD_PROCESS_SEQ 23
  22
  23#define MAX_HMAC_DIGEST_SIZE (SHA256_DIGEST_SIZE)
  24#define MAX_HMAC_BLOCK_SIZE (SHA256_BLOCK_SIZE)
  25
  26#define MAX_NONCE_SIZE CTR_RFC3686_NONCE_SIZE
  27
  28struct cc_aead_handle {
  29	cc_sram_addr_t sram_workspace_addr;
  30	struct list_head aead_list;
  31};
  32
  33struct cc_hmac_s {
  34	u8 *padded_authkey;
  35	u8 *ipad_opad; /* IPAD, OPAD*/
  36	dma_addr_t padded_authkey_dma_addr;
  37	dma_addr_t ipad_opad_dma_addr;
  38};
  39
  40struct cc_xcbc_s {
  41	u8 *xcbc_keys; /* K1,K2,K3 */
  42	dma_addr_t xcbc_keys_dma_addr;
  43};
  44
  45struct cc_aead_ctx {
  46	struct cc_drvdata *drvdata;
  47	u8 ctr_nonce[MAX_NONCE_SIZE]; /* used for ctr3686 iv and aes ccm */
  48	u8 *enckey;
  49	dma_addr_t enckey_dma_addr;
  50	union {
  51		struct cc_hmac_s hmac;
  52		struct cc_xcbc_s xcbc;
  53	} auth_state;
  54	unsigned int enc_keylen;
  55	unsigned int auth_keylen;
  56	unsigned int authsize; /* Actual (reduced?) size of the MAC/ICv */
  57	unsigned int hash_len;
  58	enum drv_cipher_mode cipher_mode;
  59	enum cc_flow_mode flow_mode;
  60	enum drv_hash_mode auth_mode;
  61};
  62
  63static inline bool valid_assoclen(struct aead_request *req)
  64{
  65	return ((req->assoclen == 16) || (req->assoclen == 20));
  66}
  67
  68static void cc_aead_exit(struct crypto_aead *tfm)
  69{
  70	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
  71	struct device *dev = drvdata_to_dev(ctx->drvdata);
  72
  73	dev_dbg(dev, "Clearing context @%p for %s\n", crypto_aead_ctx(tfm),
  74		crypto_tfm_alg_name(&tfm->base));
  75
  76	/* Unmap enckey buffer */
  77	if (ctx->enckey) {
  78		dma_free_coherent(dev, AES_MAX_KEY_SIZE, ctx->enckey,
  79				  ctx->enckey_dma_addr);
  80		dev_dbg(dev, "Freed enckey DMA buffer enckey_dma_addr=%pad\n",
  81			&ctx->enckey_dma_addr);
  82		ctx->enckey_dma_addr = 0;
  83		ctx->enckey = NULL;
  84	}
  85
  86	if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
  87		struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
  88
  89		if (xcbc->xcbc_keys) {
  90			dma_free_coherent(dev, CC_AES_128_BIT_KEY_SIZE * 3,
  91					  xcbc->xcbc_keys,
  92					  xcbc->xcbc_keys_dma_addr);
  93		}
  94		dev_dbg(dev, "Freed xcbc_keys DMA buffer xcbc_keys_dma_addr=%pad\n",
  95			&xcbc->xcbc_keys_dma_addr);
  96		xcbc->xcbc_keys_dma_addr = 0;
  97		xcbc->xcbc_keys = NULL;
  98	} else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC auth. */
  99		struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
 100
 101		if (hmac->ipad_opad) {
 102			dma_free_coherent(dev, 2 * MAX_HMAC_DIGEST_SIZE,
 103					  hmac->ipad_opad,
 104					  hmac->ipad_opad_dma_addr);
 105			dev_dbg(dev, "Freed ipad_opad DMA buffer ipad_opad_dma_addr=%pad\n",
 106				&hmac->ipad_opad_dma_addr);
 107			hmac->ipad_opad_dma_addr = 0;
 108			hmac->ipad_opad = NULL;
 109		}
 110		if (hmac->padded_authkey) {
 111			dma_free_coherent(dev, MAX_HMAC_BLOCK_SIZE,
 112					  hmac->padded_authkey,
 113					  hmac->padded_authkey_dma_addr);
 114			dev_dbg(dev, "Freed padded_authkey DMA buffer padded_authkey_dma_addr=%pad\n",
 115				&hmac->padded_authkey_dma_addr);
 116			hmac->padded_authkey_dma_addr = 0;
 117			hmac->padded_authkey = NULL;
 118		}
 119	}
 120}
 121
 122static unsigned int cc_get_aead_hash_len(struct crypto_aead *tfm)
 123{
 124	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 125
 126	return cc_get_default_hash_len(ctx->drvdata);
 127}
 128
 129static int cc_aead_init(struct crypto_aead *tfm)
 130{
 131	struct aead_alg *alg = crypto_aead_alg(tfm);
 132	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 133	struct cc_crypto_alg *cc_alg =
 134			container_of(alg, struct cc_crypto_alg, aead_alg);
 135	struct device *dev = drvdata_to_dev(cc_alg->drvdata);
 136
 137	dev_dbg(dev, "Initializing context @%p for %s\n", ctx,
 138		crypto_tfm_alg_name(&tfm->base));
 139
 140	/* Initialize modes in instance */
 141	ctx->cipher_mode = cc_alg->cipher_mode;
 142	ctx->flow_mode = cc_alg->flow_mode;
 143	ctx->auth_mode = cc_alg->auth_mode;
 144	ctx->drvdata = cc_alg->drvdata;
 145	crypto_aead_set_reqsize(tfm, sizeof(struct aead_req_ctx));
 146
 147	/* Allocate key buffer, cache line aligned */
 148	ctx->enckey = dma_alloc_coherent(dev, AES_MAX_KEY_SIZE,
 149					 &ctx->enckey_dma_addr, GFP_KERNEL);
 150	if (!ctx->enckey) {
 151		dev_err(dev, "Failed allocating key buffer\n");
 152		goto init_failed;
 153	}
 154	dev_dbg(dev, "Allocated enckey buffer in context ctx->enckey=@%p\n",
 155		ctx->enckey);
 156
 157	/* Set default authlen value */
 158
 159	if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */
 160		struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc;
 161		const unsigned int key_size = CC_AES_128_BIT_KEY_SIZE * 3;
 162
 163		/* Allocate dma-coherent buffer for XCBC's K1+K2+K3 */
 164		/* (and temporary for user key - up to 256b) */
 165		xcbc->xcbc_keys = dma_alloc_coherent(dev, key_size,
 166						     &xcbc->xcbc_keys_dma_addr,
 167						     GFP_KERNEL);
 168		if (!xcbc->xcbc_keys) {
 169			dev_err(dev, "Failed allocating buffer for XCBC keys\n");
 170			goto init_failed;
 171		}
 172	} else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC authentication */
 173		struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
 174		const unsigned int digest_size = 2 * MAX_HMAC_DIGEST_SIZE;
 175		dma_addr_t *pkey_dma = &hmac->padded_authkey_dma_addr;
 176
 177		/* Allocate dma-coherent buffer for IPAD + OPAD */
 178		hmac->ipad_opad = dma_alloc_coherent(dev, digest_size,
 179						     &hmac->ipad_opad_dma_addr,
 180						     GFP_KERNEL);
 181
 182		if (!hmac->ipad_opad) {
 183			dev_err(dev, "Failed allocating IPAD/OPAD buffer\n");
 184			goto init_failed;
 185		}
 186
 187		dev_dbg(dev, "Allocated authkey buffer in context ctx->authkey=@%p\n",
 188			hmac->ipad_opad);
 189
 190		hmac->padded_authkey = dma_alloc_coherent(dev,
 191							  MAX_HMAC_BLOCK_SIZE,
 192							  pkey_dma,
 193							  GFP_KERNEL);
 194
 195		if (!hmac->padded_authkey) {
 196			dev_err(dev, "failed to allocate padded_authkey\n");
 197			goto init_failed;
 198		}
 199	} else {
 200		ctx->auth_state.hmac.ipad_opad = NULL;
 201		ctx->auth_state.hmac.padded_authkey = NULL;
 202	}
 203	ctx->hash_len = cc_get_aead_hash_len(tfm);
 204
 205	return 0;
 206
 207init_failed:
 208	cc_aead_exit(tfm);
 209	return -ENOMEM;
 210}
 211
 212static void cc_aead_complete(struct device *dev, void *cc_req, int err)
 213{
 214	struct aead_request *areq = (struct aead_request *)cc_req;
 215	struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
 216	struct crypto_aead *tfm = crypto_aead_reqtfm(cc_req);
 217	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 218
 219	/* BACKLOG notification */
 220	if (err == -EINPROGRESS)
 221		goto done;
 222
 223	cc_unmap_aead_request(dev, areq);
 224
 225	/* Restore ordinary iv pointer */
 226	areq->iv = areq_ctx->backup_iv;
 227
 228	if (err)
 229		goto done;
 230
 231	if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
 232		if (memcmp(areq_ctx->mac_buf, areq_ctx->icv_virt_addr,
 233			   ctx->authsize) != 0) {
 234			dev_dbg(dev, "Payload authentication failure, (auth-size=%d, cipher=%d)\n",
 235				ctx->authsize, ctx->cipher_mode);
 236			/* In case of payload authentication failure, MUST NOT
 237			 * revealed the decrypted message --> zero its memory.
 238			 */
 239			sg_zero_buffer(areq->dst, sg_nents(areq->dst),
 240				       areq->cryptlen, 0);
 241			err = -EBADMSG;
 242		}
 243	/*ENCRYPT*/
 244	} else if (areq_ctx->is_icv_fragmented) {
 245		u32 skip = areq->cryptlen + areq_ctx->dst_offset;
 246
 247		cc_copy_sg_portion(dev, areq_ctx->mac_buf, areq_ctx->dst_sgl,
 248				   skip, (skip + ctx->authsize),
 249				   CC_SG_FROM_BUF);
 250	}
 251done:
 252	aead_request_complete(areq, err);
 253}
 254
 255static unsigned int xcbc_setkey(struct cc_hw_desc *desc,
 256				struct cc_aead_ctx *ctx)
 257{
 258	/* Load the AES key */
 259	hw_desc_init(&desc[0]);
 260	/* We are using for the source/user key the same buffer
 261	 * as for the output keys, * because after this key loading it
 262	 * is not needed anymore
 263	 */
 264	set_din_type(&desc[0], DMA_DLLI,
 265		     ctx->auth_state.xcbc.xcbc_keys_dma_addr, ctx->auth_keylen,
 266		     NS_BIT);
 267	set_cipher_mode(&desc[0], DRV_CIPHER_ECB);
 268	set_cipher_config0(&desc[0], DRV_CRYPTO_DIRECTION_ENCRYPT);
 269	set_key_size_aes(&desc[0], ctx->auth_keylen);
 270	set_flow_mode(&desc[0], S_DIN_to_AES);
 271	set_setup_mode(&desc[0], SETUP_LOAD_KEY0);
 272
 273	hw_desc_init(&desc[1]);
 274	set_din_const(&desc[1], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
 275	set_flow_mode(&desc[1], DIN_AES_DOUT);
 276	set_dout_dlli(&desc[1], ctx->auth_state.xcbc.xcbc_keys_dma_addr,
 277		      AES_KEYSIZE_128, NS_BIT, 0);
 278
 279	hw_desc_init(&desc[2]);
 280	set_din_const(&desc[2], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
 281	set_flow_mode(&desc[2], DIN_AES_DOUT);
 282	set_dout_dlli(&desc[2], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
 283					 + AES_KEYSIZE_128),
 284			      AES_KEYSIZE_128, NS_BIT, 0);
 285
 286	hw_desc_init(&desc[3]);
 287	set_din_const(&desc[3], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
 288	set_flow_mode(&desc[3], DIN_AES_DOUT);
 289	set_dout_dlli(&desc[3], (ctx->auth_state.xcbc.xcbc_keys_dma_addr
 290					  + 2 * AES_KEYSIZE_128),
 291			      AES_KEYSIZE_128, NS_BIT, 0);
 292
 293	return 4;
 294}
 295
 296static int hmac_setkey(struct cc_hw_desc *desc, struct cc_aead_ctx *ctx)
 
 297{
 298	unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
 299	unsigned int digest_ofs = 0;
 300	unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
 301			DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
 302	unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
 303			CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
 304	struct cc_hmac_s *hmac = &ctx->auth_state.hmac;
 305
 306	unsigned int idx = 0;
 307	int i;
 308
 309	/* calc derived HMAC key */
 310	for (i = 0; i < 2; i++) {
 311		/* Load hash initial state */
 312		hw_desc_init(&desc[idx]);
 313		set_cipher_mode(&desc[idx], hash_mode);
 314		set_din_sram(&desc[idx],
 315			     cc_larval_digest_addr(ctx->drvdata,
 316						   ctx->auth_mode),
 317			     digest_size);
 318		set_flow_mode(&desc[idx], S_DIN_to_HASH);
 319		set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 320		idx++;
 321
 322		/* Load the hash current length*/
 323		hw_desc_init(&desc[idx]);
 324		set_cipher_mode(&desc[idx], hash_mode);
 325		set_din_const(&desc[idx], 0, ctx->hash_len);
 326		set_flow_mode(&desc[idx], S_DIN_to_HASH);
 327		set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 328		idx++;
 329
 330		/* Prepare ipad key */
 331		hw_desc_init(&desc[idx]);
 332		set_xor_val(&desc[idx], hmac_pad_const[i]);
 333		set_cipher_mode(&desc[idx], hash_mode);
 334		set_flow_mode(&desc[idx], S_DIN_to_HASH);
 335		set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
 336		idx++;
 337
 338		/* Perform HASH update */
 339		hw_desc_init(&desc[idx]);
 340		set_din_type(&desc[idx], DMA_DLLI,
 341			     hmac->padded_authkey_dma_addr,
 342			     SHA256_BLOCK_SIZE, NS_BIT);
 343		set_cipher_mode(&desc[idx], hash_mode);
 344		set_xor_active(&desc[idx]);
 345		set_flow_mode(&desc[idx], DIN_HASH);
 346		idx++;
 347
 348		/* Get the digset */
 349		hw_desc_init(&desc[idx]);
 350		set_cipher_mode(&desc[idx], hash_mode);
 351		set_dout_dlli(&desc[idx],
 352			      (hmac->ipad_opad_dma_addr + digest_ofs),
 353			      digest_size, NS_BIT, 0);
 354		set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 355		set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 356		set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
 357		idx++;
 358
 359		digest_ofs += digest_size;
 360	}
 361
 362	return idx;
 363}
 364
 365static int validate_keys_sizes(struct cc_aead_ctx *ctx)
 366{
 367	struct device *dev = drvdata_to_dev(ctx->drvdata);
 368
 369	dev_dbg(dev, "enc_keylen=%u  authkeylen=%u\n",
 370		ctx->enc_keylen, ctx->auth_keylen);
 371
 372	switch (ctx->auth_mode) {
 373	case DRV_HASH_SHA1:
 374	case DRV_HASH_SHA256:
 375		break;
 376	case DRV_HASH_XCBC_MAC:
 377		if (ctx->auth_keylen != AES_KEYSIZE_128 &&
 378		    ctx->auth_keylen != AES_KEYSIZE_192 &&
 379		    ctx->auth_keylen != AES_KEYSIZE_256)
 380			return -ENOTSUPP;
 381		break;
 382	case DRV_HASH_NULL: /* Not authenc (e.g., CCM) - no auth_key) */
 383		if (ctx->auth_keylen > 0)
 384			return -EINVAL;
 385		break;
 386	default:
 387		dev_err(dev, "Invalid auth_mode=%d\n", ctx->auth_mode);
 388		return -EINVAL;
 389	}
 390	/* Check cipher key size */
 391	if (ctx->flow_mode == S_DIN_to_DES) {
 392		if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) {
 393			dev_err(dev, "Invalid cipher(3DES) key size: %u\n",
 394				ctx->enc_keylen);
 395			return -EINVAL;
 396		}
 397	} else { /* Default assumed to be AES ciphers */
 398		if (ctx->enc_keylen != AES_KEYSIZE_128 &&
 399		    ctx->enc_keylen != AES_KEYSIZE_192 &&
 400		    ctx->enc_keylen != AES_KEYSIZE_256) {
 401			dev_err(dev, "Invalid cipher(AES) key size: %u\n",
 402				ctx->enc_keylen);
 403			return -EINVAL;
 404		}
 405	}
 406
 407	return 0; /* All tests of keys sizes passed */
 408}
 409
 410/* This function prepers the user key so it can pass to the hmac processing
 411 * (copy to intenral buffer or hash in case of key longer than block
 412 */
 413static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *authkey,
 414				 unsigned int keylen)
 415{
 416	dma_addr_t key_dma_addr = 0;
 417	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 418	struct device *dev = drvdata_to_dev(ctx->drvdata);
 419	u32 larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->auth_mode);
 420	struct cc_crypto_req cc_req = {};
 421	unsigned int blocksize;
 422	unsigned int digestsize;
 423	unsigned int hashmode;
 424	unsigned int idx = 0;
 425	int rc = 0;
 426	u8 *key = NULL;
 427	struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
 428	dma_addr_t padded_authkey_dma_addr =
 429		ctx->auth_state.hmac.padded_authkey_dma_addr;
 430
 431	switch (ctx->auth_mode) { /* auth_key required and >0 */
 432	case DRV_HASH_SHA1:
 433		blocksize = SHA1_BLOCK_SIZE;
 434		digestsize = SHA1_DIGEST_SIZE;
 435		hashmode = DRV_HASH_HW_SHA1;
 436		break;
 437	case DRV_HASH_SHA256:
 438	default:
 439		blocksize = SHA256_BLOCK_SIZE;
 440		digestsize = SHA256_DIGEST_SIZE;
 441		hashmode = DRV_HASH_HW_SHA256;
 442	}
 443
 444	if (keylen != 0) {
 445
 446		key = kmemdup(authkey, keylen, GFP_KERNEL);
 447		if (!key)
 448			return -ENOMEM;
 449
 450		key_dma_addr = dma_map_single(dev, (void *)key, keylen,
 451					      DMA_TO_DEVICE);
 452		if (dma_mapping_error(dev, key_dma_addr)) {
 453			dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
 454				key, keylen);
 455			kzfree(key);
 456			return -ENOMEM;
 457		}
 458		if (keylen > blocksize) {
 459			/* Load hash initial state */
 460			hw_desc_init(&desc[idx]);
 461			set_cipher_mode(&desc[idx], hashmode);
 
 
 462			set_din_sram(&desc[idx], larval_addr, digestsize);
 463			set_flow_mode(&desc[idx], S_DIN_to_HASH);
 464			set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 465			idx++;
 466
 467			/* Load the hash current length*/
 468			hw_desc_init(&desc[idx]);
 469			set_cipher_mode(&desc[idx], hashmode);
 470			set_din_const(&desc[idx], 0, ctx->hash_len);
 471			set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
 472			set_flow_mode(&desc[idx], S_DIN_to_HASH);
 473			set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 474			idx++;
 475
 476			hw_desc_init(&desc[idx]);
 477			set_din_type(&desc[idx], DMA_DLLI,
 478				     key_dma_addr, keylen, NS_BIT);
 479			set_flow_mode(&desc[idx], DIN_HASH);
 480			idx++;
 481
 482			/* Get hashed key */
 483			hw_desc_init(&desc[idx]);
 484			set_cipher_mode(&desc[idx], hashmode);
 485			set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
 486				      digestsize, NS_BIT, 0);
 487			set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 488			set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 489			set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
 490			set_cipher_config0(&desc[idx],
 491					   HASH_DIGEST_RESULT_LITTLE_ENDIAN);
 492			idx++;
 493
 494			hw_desc_init(&desc[idx]);
 495			set_din_const(&desc[idx], 0, (blocksize - digestsize));
 496			set_flow_mode(&desc[idx], BYPASS);
 497			set_dout_dlli(&desc[idx], (padded_authkey_dma_addr +
 498				      digestsize), (blocksize - digestsize),
 499				      NS_BIT, 0);
 500			idx++;
 501		} else {
 502			hw_desc_init(&desc[idx]);
 503			set_din_type(&desc[idx], DMA_DLLI, key_dma_addr,
 504				     keylen, NS_BIT);
 505			set_flow_mode(&desc[idx], BYPASS);
 506			set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
 507				      keylen, NS_BIT, 0);
 508			idx++;
 509
 510			if ((blocksize - keylen) != 0) {
 511				hw_desc_init(&desc[idx]);
 512				set_din_const(&desc[idx], 0,
 513					      (blocksize - keylen));
 514				set_flow_mode(&desc[idx], BYPASS);
 515				set_dout_dlli(&desc[idx],
 516					      (padded_authkey_dma_addr +
 517					       keylen),
 518					      (blocksize - keylen), NS_BIT, 0);
 519				idx++;
 520			}
 521		}
 522	} else {
 523		hw_desc_init(&desc[idx]);
 524		set_din_const(&desc[idx], 0, (blocksize - keylen));
 525		set_flow_mode(&desc[idx], BYPASS);
 526		set_dout_dlli(&desc[idx], padded_authkey_dma_addr,
 527			      blocksize, NS_BIT, 0);
 528		idx++;
 529	}
 530
 531	rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
 532	if (rc)
 533		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
 534
 535	if (key_dma_addr)
 536		dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE);
 537
 538	kzfree(key);
 539
 540	return rc;
 541}
 542
 543static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
 544			  unsigned int keylen)
 545{
 546	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 547	struct cc_crypto_req cc_req = {};
 548	struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ];
 549	unsigned int seq_len = 0;
 550	struct device *dev = drvdata_to_dev(ctx->drvdata);
 551	const u8 *enckey, *authkey;
 552	int rc;
 553
 554	dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n",
 555		ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen);
 556
 557	/* STAT_PHASE_0: Init and sanity checks */
 558
 559	if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */
 560		struct crypto_authenc_keys keys;
 561
 562		rc = crypto_authenc_extractkeys(&keys, key, keylen);
 563		if (rc)
 564			goto badkey;
 565		enckey = keys.enckey;
 566		authkey = keys.authkey;
 567		ctx->enc_keylen = keys.enckeylen;
 568		ctx->auth_keylen = keys.authkeylen;
 569
 570		if (ctx->cipher_mode == DRV_CIPHER_CTR) {
 571			/* the nonce is stored in bytes at end of key */
 572			rc = -EINVAL;
 573			if (ctx->enc_keylen <
 574			    (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE))
 575				goto badkey;
 576			/* Copy nonce from last 4 bytes in CTR key to
 577			 *  first 4 bytes in CTR IV
 578			 */
 579			memcpy(ctx->ctr_nonce, enckey + ctx->enc_keylen -
 580			       CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE);
 581			/* Set CTR key size */
 582			ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE;
 583		}
 584	} else { /* non-authenc - has just one key */
 585		enckey = key;
 586		authkey = NULL;
 587		ctx->enc_keylen = keylen;
 588		ctx->auth_keylen = 0;
 589	}
 590
 591	rc = validate_keys_sizes(ctx);
 592	if (rc)
 593		goto badkey;
 594
 595	/* STAT_PHASE_1: Copy key to ctx */
 596
 597	/* Get key material */
 598	memcpy(ctx->enckey, enckey, ctx->enc_keylen);
 599	if (ctx->enc_keylen == 24)
 600		memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24);
 601	if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
 602		memcpy(ctx->auth_state.xcbc.xcbc_keys, authkey,
 603		       ctx->auth_keylen);
 604	} else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */
 605		rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen);
 606		if (rc)
 607			goto badkey;
 608	}
 609
 610	/* STAT_PHASE_2: Create sequence */
 611
 612	switch (ctx->auth_mode) {
 613	case DRV_HASH_SHA1:
 614	case DRV_HASH_SHA256:
 615		seq_len = hmac_setkey(desc, ctx);
 616		break;
 617	case DRV_HASH_XCBC_MAC:
 618		seq_len = xcbc_setkey(desc, ctx);
 619		break;
 620	case DRV_HASH_NULL: /* non-authenc modes, e.g., CCM */
 621		break; /* No auth. key setup */
 622	default:
 623		dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
 624		rc = -ENOTSUPP;
 625		goto badkey;
 626	}
 627
 628	/* STAT_PHASE_3: Submit sequence to HW */
 629
 630	if (seq_len > 0) { /* For CCM there is no sequence to setup the key */
 631		rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, seq_len);
 632		if (rc) {
 633			dev_err(dev, "send_request() failed (rc=%d)\n", rc);
 634			goto setkey_error;
 635		}
 636	}
 637
 638	/* Update STAT_PHASE_3 */
 639	return rc;
 640
 641badkey:
 642	crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
 643
 644setkey_error:
 645	return rc;
 646}
 647
 648static int cc_des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
 649			       unsigned int keylen)
 650{
 651	struct crypto_authenc_keys keys;
 652	int err;
 653
 654	err = crypto_authenc_extractkeys(&keys, key, keylen);
 655	if (unlikely(err))
 656		return err;
 657
 658	err = verify_aead_des3_key(aead, keys.enckey, keys.enckeylen) ?:
 659	      cc_aead_setkey(aead, key, keylen);
 660
 661	memzero_explicit(&keys, sizeof(keys));
 662	return err;
 663}
 664
 665static int cc_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key,
 666				 unsigned int keylen)
 667{
 668	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 669
 670	if (keylen < 3)
 671		return -EINVAL;
 672
 673	keylen -= 3;
 674	memcpy(ctx->ctr_nonce, key + keylen, 3);
 675
 676	return cc_aead_setkey(tfm, key, keylen);
 677}
 678
 679static int cc_aead_setauthsize(struct crypto_aead *authenc,
 680			       unsigned int authsize)
 681{
 682	struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
 683	struct device *dev = drvdata_to_dev(ctx->drvdata);
 684
 685	/* Unsupported auth. sizes */
 686	if (authsize == 0 ||
 687	    authsize > crypto_aead_maxauthsize(authenc)) {
 688		return -ENOTSUPP;
 689	}
 690
 691	ctx->authsize = authsize;
 692	dev_dbg(dev, "authlen=%d\n", ctx->authsize);
 693
 694	return 0;
 695}
 696
 697static int cc_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
 698				      unsigned int authsize)
 699{
 700	switch (authsize) {
 701	case 8:
 702	case 12:
 703	case 16:
 704		break;
 705	default:
 706		return -EINVAL;
 707	}
 708
 709	return cc_aead_setauthsize(authenc, authsize);
 710}
 711
 712static int cc_ccm_setauthsize(struct crypto_aead *authenc,
 713			      unsigned int authsize)
 714{
 715	switch (authsize) {
 716	case 4:
 717	case 6:
 718	case 8:
 719	case 10:
 720	case 12:
 721	case 14:
 722	case 16:
 723		break;
 724	default:
 725		return -EINVAL;
 726	}
 727
 728	return cc_aead_setauthsize(authenc, authsize);
 729}
 730
 731static void cc_set_assoc_desc(struct aead_request *areq, unsigned int flow_mode,
 732			      struct cc_hw_desc desc[], unsigned int *seq_size)
 733{
 734	struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
 735	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 736	struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
 737	enum cc_req_dma_buf_type assoc_dma_type = areq_ctx->assoc_buff_type;
 738	unsigned int idx = *seq_size;
 739	struct device *dev = drvdata_to_dev(ctx->drvdata);
 740
 741	switch (assoc_dma_type) {
 742	case CC_DMA_BUF_DLLI:
 743		dev_dbg(dev, "ASSOC buffer type DLLI\n");
 744		hw_desc_init(&desc[idx]);
 745		set_din_type(&desc[idx], DMA_DLLI, sg_dma_address(areq->src),
 746			     areq_ctx->assoclen, NS_BIT);
 747		set_flow_mode(&desc[idx], flow_mode);
 748		if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
 749		    areq_ctx->cryptlen > 0)
 750			set_din_not_last_indication(&desc[idx]);
 751		break;
 752	case CC_DMA_BUF_MLLI:
 753		dev_dbg(dev, "ASSOC buffer type MLLI\n");
 754		hw_desc_init(&desc[idx]);
 755		set_din_type(&desc[idx], DMA_MLLI, areq_ctx->assoc.sram_addr,
 756			     areq_ctx->assoc.mlli_nents, NS_BIT);
 757		set_flow_mode(&desc[idx], flow_mode);
 758		if (ctx->auth_mode == DRV_HASH_XCBC_MAC &&
 759		    areq_ctx->cryptlen > 0)
 760			set_din_not_last_indication(&desc[idx]);
 761		break;
 762	case CC_DMA_BUF_NULL:
 763	default:
 764		dev_err(dev, "Invalid ASSOC buffer type\n");
 765	}
 766
 767	*seq_size = (++idx);
 768}
 769
 770static void cc_proc_authen_desc(struct aead_request *areq,
 771				unsigned int flow_mode,
 772				struct cc_hw_desc desc[],
 773				unsigned int *seq_size, int direct)
 774{
 775	struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
 776	enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
 777	unsigned int idx = *seq_size;
 778	struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
 779	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 780	struct device *dev = drvdata_to_dev(ctx->drvdata);
 781
 782	switch (data_dma_type) {
 783	case CC_DMA_BUF_DLLI:
 784	{
 785		struct scatterlist *cipher =
 786			(direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
 787			areq_ctx->dst_sgl : areq_ctx->src_sgl;
 788
 789		unsigned int offset =
 790			(direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
 791			areq_ctx->dst_offset : areq_ctx->src_offset;
 792		dev_dbg(dev, "AUTHENC: SRC/DST buffer type DLLI\n");
 793		hw_desc_init(&desc[idx]);
 794		set_din_type(&desc[idx], DMA_DLLI,
 795			     (sg_dma_address(cipher) + offset),
 796			     areq_ctx->cryptlen, NS_BIT);
 797		set_flow_mode(&desc[idx], flow_mode);
 798		break;
 799	}
 800	case CC_DMA_BUF_MLLI:
 801	{
 802		/* DOUBLE-PASS flow (as default)
 803		 * assoc. + iv + data -compact in one table
 804		 * if assoclen is ZERO only IV perform
 805		 */
 806		cc_sram_addr_t mlli_addr = areq_ctx->assoc.sram_addr;
 807		u32 mlli_nents = areq_ctx->assoc.mlli_nents;
 808
 809		if (areq_ctx->is_single_pass) {
 810			if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
 811				mlli_addr = areq_ctx->dst.sram_addr;
 812				mlli_nents = areq_ctx->dst.mlli_nents;
 813			} else {
 814				mlli_addr = areq_ctx->src.sram_addr;
 815				mlli_nents = areq_ctx->src.mlli_nents;
 816			}
 817		}
 818
 819		dev_dbg(dev, "AUTHENC: SRC/DST buffer type MLLI\n");
 820		hw_desc_init(&desc[idx]);
 821		set_din_type(&desc[idx], DMA_MLLI, mlli_addr, mlli_nents,
 822			     NS_BIT);
 823		set_flow_mode(&desc[idx], flow_mode);
 824		break;
 825	}
 826	case CC_DMA_BUF_NULL:
 827	default:
 828		dev_err(dev, "AUTHENC: Invalid SRC/DST buffer type\n");
 829	}
 830
 831	*seq_size = (++idx);
 832}
 833
 834static void cc_proc_cipher_desc(struct aead_request *areq,
 835				unsigned int flow_mode,
 836				struct cc_hw_desc desc[],
 837				unsigned int *seq_size)
 838{
 839	unsigned int idx = *seq_size;
 840	struct aead_req_ctx *areq_ctx = aead_request_ctx(areq);
 841	enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type;
 842	struct crypto_aead *tfm = crypto_aead_reqtfm(areq);
 843	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 844	struct device *dev = drvdata_to_dev(ctx->drvdata);
 845
 846	if (areq_ctx->cryptlen == 0)
 847		return; /*null processing*/
 848
 849	switch (data_dma_type) {
 850	case CC_DMA_BUF_DLLI:
 851		dev_dbg(dev, "CIPHER: SRC/DST buffer type DLLI\n");
 852		hw_desc_init(&desc[idx]);
 853		set_din_type(&desc[idx], DMA_DLLI,
 854			     (sg_dma_address(areq_ctx->src_sgl) +
 855			      areq_ctx->src_offset), areq_ctx->cryptlen,
 856			      NS_BIT);
 857		set_dout_dlli(&desc[idx],
 858			      (sg_dma_address(areq_ctx->dst_sgl) +
 859			       areq_ctx->dst_offset),
 860			      areq_ctx->cryptlen, NS_BIT, 0);
 861		set_flow_mode(&desc[idx], flow_mode);
 862		break;
 863	case CC_DMA_BUF_MLLI:
 864		dev_dbg(dev, "CIPHER: SRC/DST buffer type MLLI\n");
 865		hw_desc_init(&desc[idx]);
 866		set_din_type(&desc[idx], DMA_MLLI, areq_ctx->src.sram_addr,
 867			     areq_ctx->src.mlli_nents, NS_BIT);
 868		set_dout_mlli(&desc[idx], areq_ctx->dst.sram_addr,
 869			      areq_ctx->dst.mlli_nents, NS_BIT, 0);
 870		set_flow_mode(&desc[idx], flow_mode);
 871		break;
 872	case CC_DMA_BUF_NULL:
 873	default:
 874		dev_err(dev, "CIPHER: Invalid SRC/DST buffer type\n");
 875	}
 876
 877	*seq_size = (++idx);
 878}
 879
 880static void cc_proc_digest_desc(struct aead_request *req,
 881				struct cc_hw_desc desc[],
 882				unsigned int *seq_size)
 883{
 884	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 885	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 886	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
 887	unsigned int idx = *seq_size;
 888	unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
 889				DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
 890	int direct = req_ctx->gen_ctx.op_type;
 891
 892	/* Get final ICV result */
 893	if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
 894		hw_desc_init(&desc[idx]);
 895		set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 896		set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 897		set_dout_dlli(&desc[idx], req_ctx->icv_dma_addr, ctx->authsize,
 898			      NS_BIT, 1);
 899		set_queue_last_ind(ctx->drvdata, &desc[idx]);
 900		if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
 901			set_aes_not_hash_mode(&desc[idx]);
 902			set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
 903		} else {
 904			set_cipher_config0(&desc[idx],
 905					   HASH_DIGEST_RESULT_LITTLE_ENDIAN);
 906			set_cipher_mode(&desc[idx], hash_mode);
 907		}
 908	} else { /*Decrypt*/
 909		/* Get ICV out from hardware */
 910		hw_desc_init(&desc[idx]);
 911		set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 912		set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 913		set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr,
 914			      ctx->authsize, NS_BIT, 1);
 915		set_queue_last_ind(ctx->drvdata, &desc[idx]);
 916		set_cipher_config0(&desc[idx],
 917				   HASH_DIGEST_RESULT_LITTLE_ENDIAN);
 918		set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
 919		if (ctx->auth_mode == DRV_HASH_XCBC_MAC) {
 920			set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
 921			set_aes_not_hash_mode(&desc[idx]);
 922		} else {
 923			set_cipher_mode(&desc[idx], hash_mode);
 924		}
 925	}
 926
 927	*seq_size = (++idx);
 928}
 929
 930static void cc_set_cipher_desc(struct aead_request *req,
 931			       struct cc_hw_desc desc[],
 932			       unsigned int *seq_size)
 933{
 934	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 935	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
 936	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
 937	unsigned int hw_iv_size = req_ctx->hw_iv_size;
 938	unsigned int idx = *seq_size;
 939	int direct = req_ctx->gen_ctx.op_type;
 940
 941	/* Setup cipher state */
 942	hw_desc_init(&desc[idx]);
 943	set_cipher_config0(&desc[idx], direct);
 944	set_flow_mode(&desc[idx], ctx->flow_mode);
 945	set_din_type(&desc[idx], DMA_DLLI, req_ctx->gen_ctx.iv_dma_addr,
 946		     hw_iv_size, NS_BIT);
 947	if (ctx->cipher_mode == DRV_CIPHER_CTR)
 948		set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
 949	else
 950		set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 951	set_cipher_mode(&desc[idx], ctx->cipher_mode);
 952	idx++;
 953
 954	/* Setup enc. key */
 955	hw_desc_init(&desc[idx]);
 956	set_cipher_config0(&desc[idx], direct);
 957	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 958	set_flow_mode(&desc[idx], ctx->flow_mode);
 959	if (ctx->flow_mode == S_DIN_to_AES) {
 960		set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
 961			     ((ctx->enc_keylen == 24) ? CC_AES_KEY_SIZE_MAX :
 962			      ctx->enc_keylen), NS_BIT);
 963		set_key_size_aes(&desc[idx], ctx->enc_keylen);
 964	} else {
 965		set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
 966			     ctx->enc_keylen, NS_BIT);
 967		set_key_size_des(&desc[idx], ctx->enc_keylen);
 968	}
 969	set_cipher_mode(&desc[idx], ctx->cipher_mode);
 970	idx++;
 971
 972	*seq_size = idx;
 973}
 974
 975static void cc_proc_cipher(struct aead_request *req, struct cc_hw_desc desc[],
 976			   unsigned int *seq_size, unsigned int data_flow_mode)
 977{
 978	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
 979	int direct = req_ctx->gen_ctx.op_type;
 980	unsigned int idx = *seq_size;
 981
 982	if (req_ctx->cryptlen == 0)
 983		return; /*null processing*/
 984
 985	cc_set_cipher_desc(req, desc, &idx);
 986	cc_proc_cipher_desc(req, data_flow_mode, desc, &idx);
 987	if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
 988		/* We must wait for DMA to write all cipher */
 989		hw_desc_init(&desc[idx]);
 990		set_din_no_dma(&desc[idx], 0, 0xfffff0);
 991		set_dout_no_dma(&desc[idx], 0, 0, 1);
 992		idx++;
 993	}
 994
 995	*seq_size = idx;
 996}
 997
 998static void cc_set_hmac_desc(struct aead_request *req, struct cc_hw_desc desc[],
 999			     unsigned int *seq_size)
1000{
1001	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1002	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1003	unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
1004				DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
1005	unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
1006				CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
1007	unsigned int idx = *seq_size;
1008
1009	/* Loading hash ipad xor key state */
1010	hw_desc_init(&desc[idx]);
1011	set_cipher_mode(&desc[idx], hash_mode);
1012	set_din_type(&desc[idx], DMA_DLLI,
1013		     ctx->auth_state.hmac.ipad_opad_dma_addr, digest_size,
1014		     NS_BIT);
1015	set_flow_mode(&desc[idx], S_DIN_to_HASH);
1016	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1017	idx++;
1018
1019	/* Load init. digest len (64 bytes) */
1020	hw_desc_init(&desc[idx]);
1021	set_cipher_mode(&desc[idx], hash_mode);
1022	set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
1023		     ctx->hash_len);
1024	set_flow_mode(&desc[idx], S_DIN_to_HASH);
1025	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1026	idx++;
1027
1028	*seq_size = idx;
1029}
1030
1031static void cc_set_xcbc_desc(struct aead_request *req, struct cc_hw_desc desc[],
1032			     unsigned int *seq_size)
1033{
1034	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1035	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1036	unsigned int idx = *seq_size;
1037
1038	/* Loading MAC state */
1039	hw_desc_init(&desc[idx]);
1040	set_din_const(&desc[idx], 0, CC_AES_BLOCK_SIZE);
1041	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1042	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1043	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1044	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1045	set_flow_mode(&desc[idx], S_DIN_to_HASH);
1046	set_aes_not_hash_mode(&desc[idx]);
1047	idx++;
1048
1049	/* Setup XCBC MAC K1 */
1050	hw_desc_init(&desc[idx]);
1051	set_din_type(&desc[idx], DMA_DLLI,
1052		     ctx->auth_state.xcbc.xcbc_keys_dma_addr,
1053		     AES_KEYSIZE_128, NS_BIT);
1054	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1055	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1056	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1057	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1058	set_flow_mode(&desc[idx], S_DIN_to_HASH);
1059	set_aes_not_hash_mode(&desc[idx]);
1060	idx++;
1061
1062	/* Setup XCBC MAC K2 */
1063	hw_desc_init(&desc[idx]);
1064	set_din_type(&desc[idx], DMA_DLLI,
1065		     (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
1066		      AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT);
1067	set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1068	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1069	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1070	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1071	set_flow_mode(&desc[idx], S_DIN_to_HASH);
1072	set_aes_not_hash_mode(&desc[idx]);
1073	idx++;
1074
1075	/* Setup XCBC MAC K3 */
1076	hw_desc_init(&desc[idx]);
1077	set_din_type(&desc[idx], DMA_DLLI,
1078		     (ctx->auth_state.xcbc.xcbc_keys_dma_addr +
1079		      2 * AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT);
1080	set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
1081	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
1082	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1083	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
1084	set_flow_mode(&desc[idx], S_DIN_to_HASH);
1085	set_aes_not_hash_mode(&desc[idx]);
1086	idx++;
1087
1088	*seq_size = idx;
1089}
1090
1091static void cc_proc_header_desc(struct aead_request *req,
1092				struct cc_hw_desc desc[],
1093				unsigned int *seq_size)
1094{
1095	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1096	unsigned int idx = *seq_size;
1097
1098	/* Hash associated data */
1099	if (areq_ctx->assoclen > 0)
1100		cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
1101
1102	/* Hash IV */
1103	*seq_size = idx;
1104}
1105
1106static void cc_proc_scheme_desc(struct aead_request *req,
1107				struct cc_hw_desc desc[],
1108				unsigned int *seq_size)
1109{
1110	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1111	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1112	struct cc_aead_handle *aead_handle = ctx->drvdata->aead_handle;
1113	unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ?
1114				DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256;
1115	unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ?
1116				CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE;
1117	unsigned int idx = *seq_size;
1118
1119	hw_desc_init(&desc[idx]);
1120	set_cipher_mode(&desc[idx], hash_mode);
1121	set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
1122		      ctx->hash_len);
1123	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1124	set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
1125	set_cipher_do(&desc[idx], DO_PAD);
1126	idx++;
1127
1128	/* Get final ICV result */
1129	hw_desc_init(&desc[idx]);
1130	set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr,
1131		      digest_size);
1132	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1133	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1134	set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
1135	set_cipher_mode(&desc[idx], hash_mode);
1136	idx++;
1137
1138	/* Loading hash opad xor key state */
1139	hw_desc_init(&desc[idx]);
1140	set_cipher_mode(&desc[idx], hash_mode);
1141	set_din_type(&desc[idx], DMA_DLLI,
1142		     (ctx->auth_state.hmac.ipad_opad_dma_addr + digest_size),
1143		     digest_size, NS_BIT);
1144	set_flow_mode(&desc[idx], S_DIN_to_HASH);
1145	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1146	idx++;
1147
1148	/* Load init. digest len (64 bytes) */
1149	hw_desc_init(&desc[idx]);
1150	set_cipher_mode(&desc[idx], hash_mode);
1151	set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode),
1152		     ctx->hash_len);
1153	set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1154	set_flow_mode(&desc[idx], S_DIN_to_HASH);
1155	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1156	idx++;
1157
1158	/* Perform HASH update */
1159	hw_desc_init(&desc[idx]);
1160	set_din_sram(&desc[idx], aead_handle->sram_workspace_addr,
1161		     digest_size);
1162	set_flow_mode(&desc[idx], DIN_HASH);
1163	idx++;
1164
1165	*seq_size = idx;
1166}
1167
1168static void cc_mlli_to_sram(struct aead_request *req,
1169			    struct cc_hw_desc desc[], unsigned int *seq_size)
1170{
1171	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1172	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1173	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1174	struct device *dev = drvdata_to_dev(ctx->drvdata);
1175
1176	if ((req_ctx->assoc_buff_type == CC_DMA_BUF_MLLI ||
1177	    req_ctx->data_buff_type == CC_DMA_BUF_MLLI ||
1178	    !req_ctx->is_single_pass) && req_ctx->mlli_params.mlli_len) {
1179		dev_dbg(dev, "Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
1180			(unsigned int)ctx->drvdata->mlli_sram_addr,
1181			req_ctx->mlli_params.mlli_len);
1182		/* Copy MLLI table host-to-sram */
1183		hw_desc_init(&desc[*seq_size]);
1184		set_din_type(&desc[*seq_size], DMA_DLLI,
1185			     req_ctx->mlli_params.mlli_dma_addr,
1186			     req_ctx->mlli_params.mlli_len, NS_BIT);
1187		set_dout_sram(&desc[*seq_size],
1188			      ctx->drvdata->mlli_sram_addr,
1189			      req_ctx->mlli_params.mlli_len);
1190		set_flow_mode(&desc[*seq_size], BYPASS);
1191		(*seq_size)++;
1192	}
1193}
1194
1195static enum cc_flow_mode cc_get_data_flow(enum drv_crypto_direction direct,
1196					  enum cc_flow_mode setup_flow_mode,
1197					  bool is_single_pass)
1198{
1199	enum cc_flow_mode data_flow_mode;
1200
1201	if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1202		if (setup_flow_mode == S_DIN_to_AES)
1203			data_flow_mode = is_single_pass ?
1204				AES_to_HASH_and_DOUT : DIN_AES_DOUT;
1205		else
1206			data_flow_mode = is_single_pass ?
1207				DES_to_HASH_and_DOUT : DIN_DES_DOUT;
1208	} else { /* Decrypt */
1209		if (setup_flow_mode == S_DIN_to_AES)
1210			data_flow_mode = is_single_pass ?
1211				AES_and_HASH : DIN_AES_DOUT;
1212		else
1213			data_flow_mode = is_single_pass ?
1214				DES_and_HASH : DIN_DES_DOUT;
1215	}
1216
1217	return data_flow_mode;
1218}
1219
1220static void cc_hmac_authenc(struct aead_request *req, struct cc_hw_desc desc[],
1221			    unsigned int *seq_size)
1222{
1223	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1224	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1225	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1226	int direct = req_ctx->gen_ctx.op_type;
1227	unsigned int data_flow_mode =
1228		cc_get_data_flow(direct, ctx->flow_mode,
1229				 req_ctx->is_single_pass);
1230
1231	if (req_ctx->is_single_pass) {
1232		/**
1233		 * Single-pass flow
1234		 */
1235		cc_set_hmac_desc(req, desc, seq_size);
1236		cc_set_cipher_desc(req, desc, seq_size);
1237		cc_proc_header_desc(req, desc, seq_size);
1238		cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
1239		cc_proc_scheme_desc(req, desc, seq_size);
1240		cc_proc_digest_desc(req, desc, seq_size);
1241		return;
1242	}
1243
1244	/**
1245	 * Double-pass flow
1246	 * Fallback for unsupported single-pass modes,
1247	 * i.e. using assoc. data of non-word-multiple
1248	 */
1249	if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1250		/* encrypt first.. */
1251		cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1252		/* authenc after..*/
1253		cc_set_hmac_desc(req, desc, seq_size);
1254		cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1255		cc_proc_scheme_desc(req, desc, seq_size);
1256		cc_proc_digest_desc(req, desc, seq_size);
1257
1258	} else { /*DECRYPT*/
1259		/* authenc first..*/
1260		cc_set_hmac_desc(req, desc, seq_size);
1261		cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1262		cc_proc_scheme_desc(req, desc, seq_size);
1263		/* decrypt after.. */
1264		cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1265		/* read the digest result with setting the completion bit
1266		 * must be after the cipher operation
1267		 */
1268		cc_proc_digest_desc(req, desc, seq_size);
1269	}
1270}
1271
1272static void
1273cc_xcbc_authenc(struct aead_request *req, struct cc_hw_desc desc[],
1274		unsigned int *seq_size)
1275{
1276	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1277	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1278	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1279	int direct = req_ctx->gen_ctx.op_type;
1280	unsigned int data_flow_mode =
1281		cc_get_data_flow(direct, ctx->flow_mode,
1282				 req_ctx->is_single_pass);
1283
1284	if (req_ctx->is_single_pass) {
1285		/**
1286		 * Single-pass flow
1287		 */
1288		cc_set_xcbc_desc(req, desc, seq_size);
1289		cc_set_cipher_desc(req, desc, seq_size);
1290		cc_proc_header_desc(req, desc, seq_size);
1291		cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size);
1292		cc_proc_digest_desc(req, desc, seq_size);
1293		return;
1294	}
1295
1296	/**
1297	 * Double-pass flow
1298	 * Fallback for unsupported single-pass modes,
1299	 * i.e. using assoc. data of non-word-multiple
1300	 */
1301	if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) {
1302		/* encrypt first.. */
1303		cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1304		/* authenc after.. */
1305		cc_set_xcbc_desc(req, desc, seq_size);
1306		cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1307		cc_proc_digest_desc(req, desc, seq_size);
1308	} else { /*DECRYPT*/
1309		/* authenc first.. */
1310		cc_set_xcbc_desc(req, desc, seq_size);
1311		cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct);
1312		/* decrypt after..*/
1313		cc_proc_cipher(req, desc, seq_size, data_flow_mode);
1314		/* read the digest result with setting the completion bit
1315		 * must be after the cipher operation
1316		 */
1317		cc_proc_digest_desc(req, desc, seq_size);
1318	}
1319}
1320
1321static int validate_data_size(struct cc_aead_ctx *ctx,
1322			      enum drv_crypto_direction direct,
1323			      struct aead_request *req)
1324{
1325	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1326	struct device *dev = drvdata_to_dev(ctx->drvdata);
1327	unsigned int assoclen = areq_ctx->assoclen;
1328	unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ?
1329			(req->cryptlen - ctx->authsize) : req->cryptlen;
1330
1331	if (direct == DRV_CRYPTO_DIRECTION_DECRYPT &&
1332	    req->cryptlen < ctx->authsize)
1333		goto data_size_err;
1334
1335	areq_ctx->is_single_pass = true; /*defaulted to fast flow*/
1336
1337	switch (ctx->flow_mode) {
1338	case S_DIN_to_AES:
1339		if (ctx->cipher_mode == DRV_CIPHER_CBC &&
1340		    !IS_ALIGNED(cipherlen, AES_BLOCK_SIZE))
1341			goto data_size_err;
1342		if (ctx->cipher_mode == DRV_CIPHER_CCM)
1343			break;
1344		if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
1345			if (areq_ctx->plaintext_authenticate_only)
1346				areq_ctx->is_single_pass = false;
1347			break;
1348		}
1349
1350		if (!IS_ALIGNED(assoclen, sizeof(u32)))
1351			areq_ctx->is_single_pass = false;
1352
1353		if (ctx->cipher_mode == DRV_CIPHER_CTR &&
1354		    !IS_ALIGNED(cipherlen, sizeof(u32)))
1355			areq_ctx->is_single_pass = false;
1356
1357		break;
1358	case S_DIN_to_DES:
1359		if (!IS_ALIGNED(cipherlen, DES_BLOCK_SIZE))
1360			goto data_size_err;
1361		if (!IS_ALIGNED(assoclen, DES_BLOCK_SIZE))
1362			areq_ctx->is_single_pass = false;
1363		break;
1364	default:
1365		dev_err(dev, "Unexpected flow mode (%d)\n", ctx->flow_mode);
1366		goto data_size_err;
1367	}
1368
1369	return 0;
1370
1371data_size_err:
1372	return -EINVAL;
1373}
1374
1375static unsigned int format_ccm_a0(u8 *pa0_buff, u32 header_size)
1376{
1377	unsigned int len = 0;
1378
1379	if (header_size == 0)
1380		return 0;
1381
1382	if (header_size < ((1UL << 16) - (1UL << 8))) {
1383		len = 2;
1384
1385		pa0_buff[0] = (header_size >> 8) & 0xFF;
1386		pa0_buff[1] = header_size & 0xFF;
1387	} else {
1388		len = 6;
1389
1390		pa0_buff[0] = 0xFF;
1391		pa0_buff[1] = 0xFE;
1392		pa0_buff[2] = (header_size >> 24) & 0xFF;
1393		pa0_buff[3] = (header_size >> 16) & 0xFF;
1394		pa0_buff[4] = (header_size >> 8) & 0xFF;
1395		pa0_buff[5] = header_size & 0xFF;
1396	}
1397
1398	return len;
1399}
1400
1401static int set_msg_len(u8 *block, unsigned int msglen, unsigned int csize)
1402{
1403	__be32 data;
1404
1405	memset(block, 0, csize);
1406	block += csize;
1407
1408	if (csize >= 4)
1409		csize = 4;
1410	else if (msglen > (1 << (8 * csize)))
1411		return -EOVERFLOW;
1412
1413	data = cpu_to_be32(msglen);
1414	memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
1415
1416	return 0;
1417}
1418
1419static int cc_ccm(struct aead_request *req, struct cc_hw_desc desc[],
1420		  unsigned int *seq_size)
1421{
1422	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1423	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1424	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1425	unsigned int idx = *seq_size;
1426	unsigned int cipher_flow_mode;
1427	dma_addr_t mac_result;
1428
1429	if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1430		cipher_flow_mode = AES_to_HASH_and_DOUT;
1431		mac_result = req_ctx->mac_buf_dma_addr;
1432	} else { /* Encrypt */
1433		cipher_flow_mode = AES_and_HASH;
1434		mac_result = req_ctx->icv_dma_addr;
1435	}
1436
1437	/* load key */
1438	hw_desc_init(&desc[idx]);
1439	set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1440	set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1441		     ((ctx->enc_keylen == 24) ?  CC_AES_KEY_SIZE_MAX :
1442		      ctx->enc_keylen), NS_BIT);
1443	set_key_size_aes(&desc[idx], ctx->enc_keylen);
1444	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1445	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1446	set_flow_mode(&desc[idx], S_DIN_to_AES);
1447	idx++;
1448
1449	/* load ctr state */
1450	hw_desc_init(&desc[idx]);
1451	set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1452	set_key_size_aes(&desc[idx], ctx->enc_keylen);
1453	set_din_type(&desc[idx], DMA_DLLI,
1454		     req_ctx->gen_ctx.iv_dma_addr, AES_BLOCK_SIZE, NS_BIT);
1455	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1456	set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1457	set_flow_mode(&desc[idx], S_DIN_to_AES);
1458	idx++;
1459
1460	/* load MAC key */
1461	hw_desc_init(&desc[idx]);
1462	set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1463	set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1464		     ((ctx->enc_keylen == 24) ?  CC_AES_KEY_SIZE_MAX :
1465		      ctx->enc_keylen), NS_BIT);
1466	set_key_size_aes(&desc[idx], ctx->enc_keylen);
1467	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1468	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1469	set_flow_mode(&desc[idx], S_DIN_to_HASH);
1470	set_aes_not_hash_mode(&desc[idx]);
1471	idx++;
1472
1473	/* load MAC state */
1474	hw_desc_init(&desc[idx]);
1475	set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1476	set_key_size_aes(&desc[idx], ctx->enc_keylen);
1477	set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1478		     AES_BLOCK_SIZE, NS_BIT);
1479	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1480	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1481	set_flow_mode(&desc[idx], S_DIN_to_HASH);
1482	set_aes_not_hash_mode(&desc[idx]);
1483	idx++;
1484
1485	/* process assoc data */
1486	if (req_ctx->assoclen > 0) {
1487		cc_set_assoc_desc(req, DIN_HASH, desc, &idx);
1488	} else {
1489		hw_desc_init(&desc[idx]);
1490		set_din_type(&desc[idx], DMA_DLLI,
1491			     sg_dma_address(&req_ctx->ccm_adata_sg),
1492			     AES_BLOCK_SIZE + req_ctx->ccm_hdr_size, NS_BIT);
1493		set_flow_mode(&desc[idx], DIN_HASH);
1494		idx++;
1495	}
1496
1497	/* process the cipher */
1498	if (req_ctx->cryptlen)
1499		cc_proc_cipher_desc(req, cipher_flow_mode, desc, &idx);
1500
1501	/* Read temporal MAC */
1502	hw_desc_init(&desc[idx]);
1503	set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC);
1504	set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, ctx->authsize,
1505		      NS_BIT, 0);
1506	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1507	set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN);
1508	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1509	set_aes_not_hash_mode(&desc[idx]);
1510	idx++;
1511
1512	/* load AES-CTR state (for last MAC calculation)*/
1513	hw_desc_init(&desc[idx]);
1514	set_cipher_mode(&desc[idx], DRV_CIPHER_CTR);
1515	set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1516	set_din_type(&desc[idx], DMA_DLLI, req_ctx->ccm_iv0_dma_addr,
1517		     AES_BLOCK_SIZE, NS_BIT);
1518	set_key_size_aes(&desc[idx], ctx->enc_keylen);
1519	set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1520	set_flow_mode(&desc[idx], S_DIN_to_AES);
1521	idx++;
1522
1523	hw_desc_init(&desc[idx]);
1524	set_din_no_dma(&desc[idx], 0, 0xfffff0);
1525	set_dout_no_dma(&desc[idx], 0, 0, 1);
1526	idx++;
1527
1528	/* encrypt the "T" value and store MAC in mac_state */
1529	hw_desc_init(&desc[idx]);
1530	set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1531		     ctx->authsize, NS_BIT);
1532	set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
1533	set_queue_last_ind(ctx->drvdata, &desc[idx]);
1534	set_flow_mode(&desc[idx], DIN_AES_DOUT);
1535	idx++;
1536
1537	*seq_size = idx;
1538	return 0;
1539}
1540
1541static int config_ccm_adata(struct aead_request *req)
1542{
1543	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1544	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1545	struct device *dev = drvdata_to_dev(ctx->drvdata);
1546	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1547	//unsigned int size_of_a = 0, rem_a_size = 0;
1548	unsigned int lp = req->iv[0];
1549	/* Note: The code assume that req->iv[0] already contains the value
1550	 * of L' of RFC3610
1551	 */
1552	unsigned int l = lp + 1;  /* This is L' of RFC 3610. */
1553	unsigned int m = ctx->authsize;  /* This is M' of RFC 3610. */
1554	u8 *b0 = req_ctx->ccm_config + CCM_B0_OFFSET;
1555	u8 *a0 = req_ctx->ccm_config + CCM_A0_OFFSET;
1556	u8 *ctr_count_0 = req_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET;
1557	unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
1558				 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1559				req->cryptlen :
1560				(req->cryptlen - ctx->authsize);
1561	int rc;
1562
1563	memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
1564	memset(req_ctx->ccm_config, 0, AES_BLOCK_SIZE * 3);
1565
1566	/* taken from crypto/ccm.c */
1567	/* 2 <= L <= 8, so 1 <= L' <= 7. */
1568	if (l < 2 || l > 8) {
1569		dev_err(dev, "illegal iv value %X\n", req->iv[0]);
1570		return -EINVAL;
1571	}
1572	memcpy(b0, req->iv, AES_BLOCK_SIZE);
1573
1574	/* format control info per RFC 3610 and
1575	 * NIST Special Publication 800-38C
1576	 */
1577	*b0 |= (8 * ((m - 2) / 2));
1578	if (req_ctx->assoclen > 0)
1579		*b0 |= 64;  /* Enable bit 6 if Adata exists. */
1580
1581	rc = set_msg_len(b0 + 16 - l, cryptlen, l);  /* Write L'. */
1582	if (rc) {
1583		dev_err(dev, "message len overflow detected");
1584		return rc;
1585	}
1586	 /* END of "taken from crypto/ccm.c" */
1587
1588	/* l(a) - size of associated data. */
1589	req_ctx->ccm_hdr_size = format_ccm_a0(a0, req_ctx->assoclen);
1590
1591	memset(req->iv + 15 - req->iv[0], 0, req->iv[0] + 1);
1592	req->iv[15] = 1;
1593
1594	memcpy(ctr_count_0, req->iv, AES_BLOCK_SIZE);
1595	ctr_count_0[15] = 0;
1596
1597	return 0;
1598}
1599
1600static void cc_proc_rfc4309_ccm(struct aead_request *req)
1601{
1602	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1603	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1604	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1605
1606	/* L' */
1607	memset(areq_ctx->ctr_iv, 0, AES_BLOCK_SIZE);
1608	/* For RFC 4309, always use 4 bytes for message length
1609	 * (at most 2^32-1 bytes).
1610	 */
1611	areq_ctx->ctr_iv[0] = 3;
1612
1613	/* In RFC 4309 there is an 11-bytes nonce+IV part,
1614	 * that we build here.
1615	 */
1616	memcpy(areq_ctx->ctr_iv + CCM_BLOCK_NONCE_OFFSET, ctx->ctr_nonce,
1617	       CCM_BLOCK_NONCE_SIZE);
1618	memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv,
1619	       CCM_BLOCK_IV_SIZE);
1620	req->iv = areq_ctx->ctr_iv;
1621	areq_ctx->assoclen -= CCM_BLOCK_IV_SIZE;
1622}
1623
1624static void cc_set_ghash_desc(struct aead_request *req,
1625			      struct cc_hw_desc desc[], unsigned int *seq_size)
1626{
1627	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1628	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1629	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1630	unsigned int idx = *seq_size;
1631
1632	/* load key to AES*/
1633	hw_desc_init(&desc[idx]);
1634	set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
1635	set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1636	set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1637		     ctx->enc_keylen, NS_BIT);
1638	set_key_size_aes(&desc[idx], ctx->enc_keylen);
1639	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1640	set_flow_mode(&desc[idx], S_DIN_to_AES);
1641	idx++;
1642
1643	/* process one zero block to generate hkey */
1644	hw_desc_init(&desc[idx]);
1645	set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
1646	set_dout_dlli(&desc[idx], req_ctx->hkey_dma_addr, AES_BLOCK_SIZE,
1647		      NS_BIT, 0);
1648	set_flow_mode(&desc[idx], DIN_AES_DOUT);
1649	idx++;
1650
1651	/* Memory Barrier */
1652	hw_desc_init(&desc[idx]);
1653	set_din_no_dma(&desc[idx], 0, 0xfffff0);
1654	set_dout_no_dma(&desc[idx], 0, 0, 1);
1655	idx++;
1656
1657	/* Load GHASH subkey */
1658	hw_desc_init(&desc[idx]);
1659	set_din_type(&desc[idx], DMA_DLLI, req_ctx->hkey_dma_addr,
1660		     AES_BLOCK_SIZE, NS_BIT);
1661	set_dout_no_dma(&desc[idx], 0, 0, 1);
1662	set_flow_mode(&desc[idx], S_DIN_to_HASH);
1663	set_aes_not_hash_mode(&desc[idx]);
1664	set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1665	set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1666	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1667	idx++;
1668
1669	/* Configure Hash Engine to work with GHASH.
1670	 * Since it was not possible to extend HASH submodes to add GHASH,
1671	 * The following command is necessary in order to
1672	 * select GHASH (according to HW designers)
1673	 */
1674	hw_desc_init(&desc[idx]);
1675	set_din_no_dma(&desc[idx], 0, 0xfffff0);
1676	set_dout_no_dma(&desc[idx], 0, 0, 1);
1677	set_flow_mode(&desc[idx], S_DIN_to_HASH);
1678	set_aes_not_hash_mode(&desc[idx]);
1679	set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1680	set_cipher_do(&desc[idx], 1); //1=AES_SK RKEK
1681	set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1682	set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1683	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1684	idx++;
1685
1686	/* Load GHASH initial STATE (which is 0). (for any hash there is an
1687	 * initial state)
1688	 */
1689	hw_desc_init(&desc[idx]);
1690	set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE);
1691	set_dout_no_dma(&desc[idx], 0, 0, 1);
1692	set_flow_mode(&desc[idx], S_DIN_to_HASH);
1693	set_aes_not_hash_mode(&desc[idx]);
1694	set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1695	set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
1696	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
1697	idx++;
1698
1699	*seq_size = idx;
1700}
1701
1702static void cc_set_gctr_desc(struct aead_request *req, struct cc_hw_desc desc[],
1703			     unsigned int *seq_size)
1704{
1705	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1706	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1707	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1708	unsigned int idx = *seq_size;
1709
1710	/* load key to AES*/
1711	hw_desc_init(&desc[idx]);
1712	set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1713	set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1714	set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr,
1715		     ctx->enc_keylen, NS_BIT);
1716	set_key_size_aes(&desc[idx], ctx->enc_keylen);
1717	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1718	set_flow_mode(&desc[idx], S_DIN_to_AES);
1719	idx++;
1720
1721	if (req_ctx->cryptlen && !req_ctx->plaintext_authenticate_only) {
1722		/* load AES/CTR initial CTR value inc by 2*/
1723		hw_desc_init(&desc[idx]);
1724		set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1725		set_key_size_aes(&desc[idx], ctx->enc_keylen);
1726		set_din_type(&desc[idx], DMA_DLLI,
1727			     req_ctx->gcm_iv_inc2_dma_addr, AES_BLOCK_SIZE,
1728			     NS_BIT);
1729		set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1730		set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1731		set_flow_mode(&desc[idx], S_DIN_to_AES);
1732		idx++;
1733	}
1734
1735	*seq_size = idx;
1736}
1737
1738static void cc_proc_gcm_result(struct aead_request *req,
1739			       struct cc_hw_desc desc[],
1740			       unsigned int *seq_size)
1741{
1742	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1743	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1744	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1745	dma_addr_t mac_result;
1746	unsigned int idx = *seq_size;
1747
1748	if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1749		mac_result = req_ctx->mac_buf_dma_addr;
1750	} else { /* Encrypt */
1751		mac_result = req_ctx->icv_dma_addr;
1752	}
1753
1754	/* process(ghash) gcm_block_len */
1755	hw_desc_init(&desc[idx]);
1756	set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_block_len_dma_addr,
1757		     AES_BLOCK_SIZE, NS_BIT);
1758	set_flow_mode(&desc[idx], DIN_HASH);
1759	idx++;
1760
1761	/* Store GHASH state after GHASH(Associated Data + Cipher +LenBlock) */
1762	hw_desc_init(&desc[idx]);
1763	set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH);
1764	set_din_no_dma(&desc[idx], 0, 0xfffff0);
1765	set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, AES_BLOCK_SIZE,
1766		      NS_BIT, 0);
1767	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1768	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
1769	set_aes_not_hash_mode(&desc[idx]);
1770
1771	idx++;
1772
1773	/* load AES/CTR initial CTR value inc by 1*/
1774	hw_desc_init(&desc[idx]);
1775	set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1776	set_key_size_aes(&desc[idx], ctx->enc_keylen);
1777	set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_iv_inc1_dma_addr,
1778		     AES_BLOCK_SIZE, NS_BIT);
1779	set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
1780	set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
1781	set_flow_mode(&desc[idx], S_DIN_to_AES);
1782	idx++;
1783
1784	/* Memory Barrier */
1785	hw_desc_init(&desc[idx]);
1786	set_din_no_dma(&desc[idx], 0, 0xfffff0);
1787	set_dout_no_dma(&desc[idx], 0, 0, 1);
1788	idx++;
1789
1790	/* process GCTR on stored GHASH and store MAC in mac_state*/
1791	hw_desc_init(&desc[idx]);
1792	set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR);
1793	set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr,
1794		     AES_BLOCK_SIZE, NS_BIT);
1795	set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1);
1796	set_queue_last_ind(ctx->drvdata, &desc[idx]);
1797	set_flow_mode(&desc[idx], DIN_AES_DOUT);
1798	idx++;
1799
1800	*seq_size = idx;
1801}
1802
1803static int cc_gcm(struct aead_request *req, struct cc_hw_desc desc[],
1804		  unsigned int *seq_size)
1805{
1806	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1807	unsigned int cipher_flow_mode;
1808
1809	if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) {
1810		cipher_flow_mode = AES_and_HASH;
1811	} else { /* Encrypt */
1812		cipher_flow_mode = AES_to_HASH_and_DOUT;
1813	}
1814
1815	//in RFC4543 no data to encrypt. just copy data from src to dest.
1816	if (req_ctx->plaintext_authenticate_only) {
1817		cc_proc_cipher_desc(req, BYPASS, desc, seq_size);
1818		cc_set_ghash_desc(req, desc, seq_size);
1819		/* process(ghash) assoc data */
1820		cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
1821		cc_set_gctr_desc(req, desc, seq_size);
1822		cc_proc_gcm_result(req, desc, seq_size);
1823		return 0;
1824	}
1825
 
 
 
 
 
 
1826	// for gcm and rfc4106.
1827	cc_set_ghash_desc(req, desc, seq_size);
1828	/* process(ghash) assoc data */
1829	if (req_ctx->assoclen > 0)
1830		cc_set_assoc_desc(req, DIN_HASH, desc, seq_size);
1831	cc_set_gctr_desc(req, desc, seq_size);
1832	/* process(gctr+ghash) */
1833	if (req_ctx->cryptlen)
1834		cc_proc_cipher_desc(req, cipher_flow_mode, desc, seq_size);
1835	cc_proc_gcm_result(req, desc, seq_size);
1836
1837	return 0;
1838}
1839
1840static int config_gcm_context(struct aead_request *req)
1841{
1842	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1843	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1844	struct aead_req_ctx *req_ctx = aead_request_ctx(req);
1845	struct device *dev = drvdata_to_dev(ctx->drvdata);
1846
1847	unsigned int cryptlen = (req_ctx->gen_ctx.op_type ==
1848				 DRV_CRYPTO_DIRECTION_ENCRYPT) ?
1849				req->cryptlen :
1850				(req->cryptlen - ctx->authsize);
1851	__be32 counter = cpu_to_be32(2);
1852
1853	dev_dbg(dev, "%s() cryptlen = %d, req_ctx->assoclen = %d ctx->authsize = %d\n",
1854		__func__, cryptlen, req_ctx->assoclen, ctx->authsize);
1855
1856	memset(req_ctx->hkey, 0, AES_BLOCK_SIZE);
1857
1858	memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE);
1859
1860	memcpy(req->iv + 12, &counter, 4);
1861	memcpy(req_ctx->gcm_iv_inc2, req->iv, 16);
1862
1863	counter = cpu_to_be32(1);
1864	memcpy(req->iv + 12, &counter, 4);
1865	memcpy(req_ctx->gcm_iv_inc1, req->iv, 16);
1866
1867	if (!req_ctx->plaintext_authenticate_only) {
1868		__be64 temp64;
1869
1870		temp64 = cpu_to_be64(req_ctx->assoclen * 8);
1871		memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
1872		temp64 = cpu_to_be64(cryptlen * 8);
1873		memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
1874	} else {
1875		/* rfc4543=>  all data(AAD,IV,Plain) are considered additional
1876		 * data that is nothing is encrypted.
1877		 */
1878		__be64 temp64;
1879
1880		temp64 = cpu_to_be64((req_ctx->assoclen +
1881				      GCM_BLOCK_RFC4_IV_SIZE + cryptlen) * 8);
1882		memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64));
1883		temp64 = 0;
1884		memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8);
1885	}
1886
1887	return 0;
1888}
1889
1890static void cc_proc_rfc4_gcm(struct aead_request *req)
1891{
1892	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1893	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1894	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1895
1896	memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_NONCE_OFFSET,
1897	       ctx->ctr_nonce, GCM_BLOCK_RFC4_NONCE_SIZE);
1898	memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv,
1899	       GCM_BLOCK_RFC4_IV_SIZE);
1900	req->iv = areq_ctx->ctr_iv;
1901	areq_ctx->assoclen -= GCM_BLOCK_RFC4_IV_SIZE;
1902}
1903
1904static int cc_proc_aead(struct aead_request *req,
1905			enum drv_crypto_direction direct)
1906{
1907	int rc = 0;
1908	int seq_len = 0;
1909	struct cc_hw_desc desc[MAX_AEAD_PROCESS_SEQ];
1910	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1911	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
1912	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
1913	struct device *dev = drvdata_to_dev(ctx->drvdata);
1914	struct cc_crypto_req cc_req = {};
1915
1916	dev_dbg(dev, "%s context=%p req=%p iv=%p src=%p src_ofs=%d dst=%p dst_ofs=%d cryptolen=%d\n",
1917		((direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Enc" : "Dec"),
1918		ctx, req, req->iv, sg_virt(req->src), req->src->offset,
1919		sg_virt(req->dst), req->dst->offset, req->cryptlen);
1920
1921	/* STAT_PHASE_0: Init and sanity checks */
1922
1923	/* Check data length according to mode */
1924	if (validate_data_size(ctx, direct, req)) {
1925		dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n",
1926			req->cryptlen, areq_ctx->assoclen);
1927		crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN);
1928		return -EINVAL;
1929	}
1930
1931	/* Setup request structure */
1932	cc_req.user_cb = (void *)cc_aead_complete;
1933	cc_req.user_arg = (void *)req;
1934
1935	/* Setup request context */
1936	areq_ctx->gen_ctx.op_type = direct;
1937	areq_ctx->req_authsize = ctx->authsize;
1938	areq_ctx->cipher_mode = ctx->cipher_mode;
1939
1940	/* STAT_PHASE_1: Map buffers */
1941
1942	if (ctx->cipher_mode == DRV_CIPHER_CTR) {
1943		/* Build CTR IV - Copy nonce from last 4 bytes in
1944		 * CTR key to first 4 bytes in CTR IV
1945		 */
1946		memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce,
1947		       CTR_RFC3686_NONCE_SIZE);
1948		memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE, req->iv,
1949		       CTR_RFC3686_IV_SIZE);
1950		/* Initialize counter portion of counter block */
1951		*(__be32 *)(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE +
1952			    CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1953
1954		/* Replace with counter iv */
1955		req->iv = areq_ctx->ctr_iv;
1956		areq_ctx->hw_iv_size = CTR_RFC3686_BLOCK_SIZE;
1957	} else if ((ctx->cipher_mode == DRV_CIPHER_CCM) ||
1958		   (ctx->cipher_mode == DRV_CIPHER_GCTR)) {
1959		areq_ctx->hw_iv_size = AES_BLOCK_SIZE;
1960		if (areq_ctx->ctr_iv != req->iv) {
1961			memcpy(areq_ctx->ctr_iv, req->iv,
1962			       crypto_aead_ivsize(tfm));
1963			req->iv = areq_ctx->ctr_iv;
1964		}
1965	}  else {
1966		areq_ctx->hw_iv_size = crypto_aead_ivsize(tfm);
1967	}
1968
1969	if (ctx->cipher_mode == DRV_CIPHER_CCM) {
1970		rc = config_ccm_adata(req);
1971		if (rc) {
1972			dev_dbg(dev, "config_ccm_adata() returned with a failure %d!",
1973				rc);
1974			goto exit;
1975		}
1976	} else {
1977		areq_ctx->ccm_hdr_size = ccm_header_size_null;
1978	}
1979
1980	if (ctx->cipher_mode == DRV_CIPHER_GCTR) {
1981		rc = config_gcm_context(req);
1982		if (rc) {
1983			dev_dbg(dev, "config_gcm_context() returned with a failure %d!",
1984				rc);
1985			goto exit;
1986		}
1987	}
1988
1989	rc = cc_map_aead_request(ctx->drvdata, req);
1990	if (rc) {
1991		dev_err(dev, "map_request() failed\n");
1992		goto exit;
1993	}
1994
1995	/* STAT_PHASE_2: Create sequence */
1996
1997	/* Load MLLI tables to SRAM if necessary */
1998	cc_mlli_to_sram(req, desc, &seq_len);
1999
2000	/*TODO: move seq len by reference */
2001	switch (ctx->auth_mode) {
2002	case DRV_HASH_SHA1:
2003	case DRV_HASH_SHA256:
2004		cc_hmac_authenc(req, desc, &seq_len);
2005		break;
2006	case DRV_HASH_XCBC_MAC:
2007		cc_xcbc_authenc(req, desc, &seq_len);
2008		break;
2009	case DRV_HASH_NULL:
2010		if (ctx->cipher_mode == DRV_CIPHER_CCM)
2011			cc_ccm(req, desc, &seq_len);
2012		if (ctx->cipher_mode == DRV_CIPHER_GCTR)
2013			cc_gcm(req, desc, &seq_len);
2014		break;
2015	default:
2016		dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode);
2017		cc_unmap_aead_request(dev, req);
2018		rc = -ENOTSUPP;
2019		goto exit;
2020	}
2021
2022	/* STAT_PHASE_3: Lock HW and push sequence */
2023
2024	rc = cc_send_request(ctx->drvdata, &cc_req, desc, seq_len, &req->base);
2025
2026	if (rc != -EINPROGRESS && rc != -EBUSY) {
2027		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
2028		cc_unmap_aead_request(dev, req);
2029	}
2030
2031exit:
2032	return rc;
2033}
2034
2035static int cc_aead_encrypt(struct aead_request *req)
2036{
2037	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2038	int rc;
2039
2040	memset(areq_ctx, 0, sizeof(*areq_ctx));
2041
2042	/* No generated IV required */
2043	areq_ctx->backup_iv = req->iv;
2044	areq_ctx->assoclen = req->assoclen;
2045	areq_ctx->is_gcm4543 = false;
2046
2047	areq_ctx->plaintext_authenticate_only = false;
2048
2049	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2050	if (rc != -EINPROGRESS && rc != -EBUSY)
2051		req->iv = areq_ctx->backup_iv;
2052
2053	return rc;
2054}
2055
2056static int cc_rfc4309_ccm_encrypt(struct aead_request *req)
2057{
2058	/* Very similar to cc_aead_encrypt() above. */
2059
2060	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2061	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2062	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2063	struct device *dev = drvdata_to_dev(ctx->drvdata);
2064	int rc = -EINVAL;
2065
2066	if (!valid_assoclen(req)) {
2067		dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2068		goto out;
2069	}
2070
2071	memset(areq_ctx, 0, sizeof(*areq_ctx));
2072
2073	/* No generated IV required */
2074	areq_ctx->backup_iv = req->iv;
2075	areq_ctx->assoclen = req->assoclen;
2076	areq_ctx->is_gcm4543 = true;
2077
2078	cc_proc_rfc4309_ccm(req);
2079
2080	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2081	if (rc != -EINPROGRESS && rc != -EBUSY)
2082		req->iv = areq_ctx->backup_iv;
2083out:
2084	return rc;
2085}
2086
2087static int cc_aead_decrypt(struct aead_request *req)
2088{
2089	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2090	int rc;
2091
2092	memset(areq_ctx, 0, sizeof(*areq_ctx));
2093
2094	/* No generated IV required */
2095	areq_ctx->backup_iv = req->iv;
2096	areq_ctx->assoclen = req->assoclen;
2097	areq_ctx->is_gcm4543 = false;
2098
2099	areq_ctx->plaintext_authenticate_only = false;
2100
2101	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2102	if (rc != -EINPROGRESS && rc != -EBUSY)
2103		req->iv = areq_ctx->backup_iv;
2104
2105	return rc;
2106}
2107
2108static int cc_rfc4309_ccm_decrypt(struct aead_request *req)
2109{
2110	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2111	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2112	struct device *dev = drvdata_to_dev(ctx->drvdata);
2113	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2114	int rc = -EINVAL;
2115
2116	if (!valid_assoclen(req)) {
2117		dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2118		goto out;
2119	}
2120
2121	memset(areq_ctx, 0, sizeof(*areq_ctx));
2122
2123	/* No generated IV required */
2124	areq_ctx->backup_iv = req->iv;
2125	areq_ctx->assoclen = req->assoclen;
2126
2127	areq_ctx->is_gcm4543 = true;
2128	cc_proc_rfc4309_ccm(req);
2129
2130	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2131	if (rc != -EINPROGRESS && rc != -EBUSY)
2132		req->iv = areq_ctx->backup_iv;
2133
2134out:
2135	return rc;
2136}
2137
2138static int cc_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
2139				 unsigned int keylen)
2140{
2141	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2142	struct device *dev = drvdata_to_dev(ctx->drvdata);
2143
2144	dev_dbg(dev, "%s()  keylen %d, key %p\n", __func__, keylen, key);
2145
2146	if (keylen < 4)
2147		return -EINVAL;
2148
2149	keylen -= 4;
2150	memcpy(ctx->ctr_nonce, key + keylen, 4);
2151
2152	return cc_aead_setkey(tfm, key, keylen);
2153}
2154
2155static int cc_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
2156				 unsigned int keylen)
2157{
2158	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2159	struct device *dev = drvdata_to_dev(ctx->drvdata);
2160
2161	dev_dbg(dev, "%s()  keylen %d, key %p\n", __func__, keylen, key);
2162
2163	if (keylen < 4)
2164		return -EINVAL;
2165
2166	keylen -= 4;
2167	memcpy(ctx->ctr_nonce, key + keylen, 4);
2168
2169	return cc_aead_setkey(tfm, key, keylen);
2170}
2171
2172static int cc_gcm_setauthsize(struct crypto_aead *authenc,
2173			      unsigned int authsize)
2174{
2175	switch (authsize) {
2176	case 4:
2177	case 8:
2178	case 12:
2179	case 13:
2180	case 14:
2181	case 15:
2182	case 16:
2183		break;
2184	default:
2185		return -EINVAL;
2186	}
2187
2188	return cc_aead_setauthsize(authenc, authsize);
2189}
2190
2191static int cc_rfc4106_gcm_setauthsize(struct crypto_aead *authenc,
2192				      unsigned int authsize)
2193{
2194	struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
2195	struct device *dev = drvdata_to_dev(ctx->drvdata);
2196
2197	dev_dbg(dev, "authsize %d\n", authsize);
2198
2199	switch (authsize) {
2200	case 8:
2201	case 12:
2202	case 16:
2203		break;
2204	default:
2205		return -EINVAL;
2206	}
2207
2208	return cc_aead_setauthsize(authenc, authsize);
2209}
2210
2211static int cc_rfc4543_gcm_setauthsize(struct crypto_aead *authenc,
2212				      unsigned int authsize)
2213{
2214	struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc);
2215	struct device *dev = drvdata_to_dev(ctx->drvdata);
2216
2217	dev_dbg(dev, "authsize %d\n", authsize);
2218
2219	if (authsize != 16)
2220		return -EINVAL;
2221
2222	return cc_aead_setauthsize(authenc, authsize);
2223}
2224
2225static int cc_rfc4106_gcm_encrypt(struct aead_request *req)
2226{
2227	/* Very similar to cc_aead_encrypt() above. */
 
2228
2229	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2230	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2231	struct device *dev = drvdata_to_dev(ctx->drvdata);
2232	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2233	int rc = -EINVAL;
2234
2235	if (!valid_assoclen(req)) {
2236		dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2237		goto out;
2238	}
2239
2240	memset(areq_ctx, 0, sizeof(*areq_ctx));
2241
2242	/* No generated IV required */
2243	areq_ctx->backup_iv = req->iv;
2244	areq_ctx->assoclen = req->assoclen;
2245	areq_ctx->plaintext_authenticate_only = false;
2246
2247	cc_proc_rfc4_gcm(req);
2248	areq_ctx->is_gcm4543 = true;
2249
2250	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2251	if (rc != -EINPROGRESS && rc != -EBUSY)
2252		req->iv = areq_ctx->backup_iv;
2253out:
2254	return rc;
2255}
2256
2257static int cc_rfc4543_gcm_encrypt(struct aead_request *req)
2258{
2259	/* Very similar to cc_aead_encrypt() above. */
2260	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2261	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2262	struct device *dev = drvdata_to_dev(ctx->drvdata);
2263	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2264	int rc = -EINVAL;
2265
2266	if (!valid_assoclen(req)) {
2267		dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2268		goto out;
2269	}
2270
2271	memset(areq_ctx, 0, sizeof(*areq_ctx));
2272
2273	//plaintext is not encryped with rfc4543
2274	areq_ctx->plaintext_authenticate_only = true;
2275
2276	/* No generated IV required */
2277	areq_ctx->backup_iv = req->iv;
2278	areq_ctx->assoclen = req->assoclen;
2279
2280	cc_proc_rfc4_gcm(req);
2281	areq_ctx->is_gcm4543 = true;
2282
2283	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT);
2284	if (rc != -EINPROGRESS && rc != -EBUSY)
2285		req->iv = areq_ctx->backup_iv;
2286out:
2287	return rc;
2288}
2289
2290static int cc_rfc4106_gcm_decrypt(struct aead_request *req)
2291{
2292	/* Very similar to cc_aead_decrypt() above. */
 
2293
2294	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2295	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2296	struct device *dev = drvdata_to_dev(ctx->drvdata);
2297	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2298	int rc = -EINVAL;
2299
2300	if (!valid_assoclen(req)) {
2301		dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2302		goto out;
2303	}
2304
2305	memset(areq_ctx, 0, sizeof(*areq_ctx));
2306
2307	/* No generated IV required */
2308	areq_ctx->backup_iv = req->iv;
2309	areq_ctx->assoclen = req->assoclen;
2310	areq_ctx->plaintext_authenticate_only = false;
2311
2312	cc_proc_rfc4_gcm(req);
2313	areq_ctx->is_gcm4543 = true;
2314
2315	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2316	if (rc != -EINPROGRESS && rc != -EBUSY)
2317		req->iv = areq_ctx->backup_iv;
2318out:
2319	return rc;
2320}
2321
2322static int cc_rfc4543_gcm_decrypt(struct aead_request *req)
2323{
2324	/* Very similar to cc_aead_decrypt() above. */
2325	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2326	struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm);
2327	struct device *dev = drvdata_to_dev(ctx->drvdata);
2328	struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
2329	int rc = -EINVAL;
2330
2331	if (!valid_assoclen(req)) {
2332		dev_err(dev, "invalid Assoclen:%u\n", req->assoclen);
2333		goto out;
2334	}
2335
2336	memset(areq_ctx, 0, sizeof(*areq_ctx));
2337
2338	//plaintext is not decryped with rfc4543
2339	areq_ctx->plaintext_authenticate_only = true;
2340
2341	/* No generated IV required */
2342	areq_ctx->backup_iv = req->iv;
2343	areq_ctx->assoclen = req->assoclen;
2344
2345	cc_proc_rfc4_gcm(req);
2346	areq_ctx->is_gcm4543 = true;
2347
2348	rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT);
2349	if (rc != -EINPROGRESS && rc != -EBUSY)
2350		req->iv = areq_ctx->backup_iv;
2351out:
2352	return rc;
2353}
2354
2355/* aead alg */
2356static struct cc_alg_template aead_algs[] = {
2357	{
2358		.name = "authenc(hmac(sha1),cbc(aes))",
2359		.driver_name = "authenc-hmac-sha1-cbc-aes-ccree",
2360		.blocksize = AES_BLOCK_SIZE,
2361		.template_aead = {
2362			.setkey = cc_aead_setkey,
2363			.setauthsize = cc_aead_setauthsize,
2364			.encrypt = cc_aead_encrypt,
2365			.decrypt = cc_aead_decrypt,
2366			.init = cc_aead_init,
2367			.exit = cc_aead_exit,
2368			.ivsize = AES_BLOCK_SIZE,
2369			.maxauthsize = SHA1_DIGEST_SIZE,
2370		},
2371		.cipher_mode = DRV_CIPHER_CBC,
2372		.flow_mode = S_DIN_to_AES,
2373		.auth_mode = DRV_HASH_SHA1,
2374		.min_hw_rev = CC_HW_REV_630,
2375		.std_body = CC_STD_NIST,
2376	},
2377	{
2378		.name = "authenc(hmac(sha1),cbc(des3_ede))",
2379		.driver_name = "authenc-hmac-sha1-cbc-des3-ccree",
2380		.blocksize = DES3_EDE_BLOCK_SIZE,
2381		.template_aead = {
2382			.setkey = cc_des3_aead_setkey,
2383			.setauthsize = cc_aead_setauthsize,
2384			.encrypt = cc_aead_encrypt,
2385			.decrypt = cc_aead_decrypt,
2386			.init = cc_aead_init,
2387			.exit = cc_aead_exit,
2388			.ivsize = DES3_EDE_BLOCK_SIZE,
2389			.maxauthsize = SHA1_DIGEST_SIZE,
2390		},
2391		.cipher_mode = DRV_CIPHER_CBC,
2392		.flow_mode = S_DIN_to_DES,
2393		.auth_mode = DRV_HASH_SHA1,
2394		.min_hw_rev = CC_HW_REV_630,
2395		.std_body = CC_STD_NIST,
2396	},
2397	{
2398		.name = "authenc(hmac(sha256),cbc(aes))",
2399		.driver_name = "authenc-hmac-sha256-cbc-aes-ccree",
2400		.blocksize = AES_BLOCK_SIZE,
2401		.template_aead = {
2402			.setkey = cc_aead_setkey,
2403			.setauthsize = cc_aead_setauthsize,
2404			.encrypt = cc_aead_encrypt,
2405			.decrypt = cc_aead_decrypt,
2406			.init = cc_aead_init,
2407			.exit = cc_aead_exit,
2408			.ivsize = AES_BLOCK_SIZE,
2409			.maxauthsize = SHA256_DIGEST_SIZE,
2410		},
2411		.cipher_mode = DRV_CIPHER_CBC,
2412		.flow_mode = S_DIN_to_AES,
2413		.auth_mode = DRV_HASH_SHA256,
2414		.min_hw_rev = CC_HW_REV_630,
2415		.std_body = CC_STD_NIST,
2416	},
2417	{
2418		.name = "authenc(hmac(sha256),cbc(des3_ede))",
2419		.driver_name = "authenc-hmac-sha256-cbc-des3-ccree",
2420		.blocksize = DES3_EDE_BLOCK_SIZE,
2421		.template_aead = {
2422			.setkey = cc_des3_aead_setkey,
2423			.setauthsize = cc_aead_setauthsize,
2424			.encrypt = cc_aead_encrypt,
2425			.decrypt = cc_aead_decrypt,
2426			.init = cc_aead_init,
2427			.exit = cc_aead_exit,
2428			.ivsize = DES3_EDE_BLOCK_SIZE,
2429			.maxauthsize = SHA256_DIGEST_SIZE,
2430		},
2431		.cipher_mode = DRV_CIPHER_CBC,
2432		.flow_mode = S_DIN_to_DES,
2433		.auth_mode = DRV_HASH_SHA256,
2434		.min_hw_rev = CC_HW_REV_630,
2435		.std_body = CC_STD_NIST,
2436	},
2437	{
2438		.name = "authenc(xcbc(aes),cbc(aes))",
2439		.driver_name = "authenc-xcbc-aes-cbc-aes-ccree",
2440		.blocksize = AES_BLOCK_SIZE,
2441		.template_aead = {
2442			.setkey = cc_aead_setkey,
2443			.setauthsize = cc_aead_setauthsize,
2444			.encrypt = cc_aead_encrypt,
2445			.decrypt = cc_aead_decrypt,
2446			.init = cc_aead_init,
2447			.exit = cc_aead_exit,
2448			.ivsize = AES_BLOCK_SIZE,
2449			.maxauthsize = AES_BLOCK_SIZE,
2450		},
2451		.cipher_mode = DRV_CIPHER_CBC,
2452		.flow_mode = S_DIN_to_AES,
2453		.auth_mode = DRV_HASH_XCBC_MAC,
2454		.min_hw_rev = CC_HW_REV_630,
2455		.std_body = CC_STD_NIST,
2456	},
2457	{
2458		.name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
2459		.driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-ccree",
2460		.blocksize = 1,
2461		.template_aead = {
2462			.setkey = cc_aead_setkey,
2463			.setauthsize = cc_aead_setauthsize,
2464			.encrypt = cc_aead_encrypt,
2465			.decrypt = cc_aead_decrypt,
2466			.init = cc_aead_init,
2467			.exit = cc_aead_exit,
2468			.ivsize = CTR_RFC3686_IV_SIZE,
2469			.maxauthsize = SHA1_DIGEST_SIZE,
2470		},
2471		.cipher_mode = DRV_CIPHER_CTR,
2472		.flow_mode = S_DIN_to_AES,
2473		.auth_mode = DRV_HASH_SHA1,
2474		.min_hw_rev = CC_HW_REV_630,
2475		.std_body = CC_STD_NIST,
2476	},
2477	{
2478		.name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
2479		.driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-ccree",
2480		.blocksize = 1,
2481		.template_aead = {
2482			.setkey = cc_aead_setkey,
2483			.setauthsize = cc_aead_setauthsize,
2484			.encrypt = cc_aead_encrypt,
2485			.decrypt = cc_aead_decrypt,
2486			.init = cc_aead_init,
2487			.exit = cc_aead_exit,
2488			.ivsize = CTR_RFC3686_IV_SIZE,
2489			.maxauthsize = SHA256_DIGEST_SIZE,
2490		},
2491		.cipher_mode = DRV_CIPHER_CTR,
2492		.flow_mode = S_DIN_to_AES,
2493		.auth_mode = DRV_HASH_SHA256,
2494		.min_hw_rev = CC_HW_REV_630,
2495		.std_body = CC_STD_NIST,
2496	},
2497	{
2498		.name = "authenc(xcbc(aes),rfc3686(ctr(aes)))",
2499		.driver_name = "authenc-xcbc-aes-rfc3686-ctr-aes-ccree",
2500		.blocksize = 1,
2501		.template_aead = {
2502			.setkey = cc_aead_setkey,
2503			.setauthsize = cc_aead_setauthsize,
2504			.encrypt = cc_aead_encrypt,
2505			.decrypt = cc_aead_decrypt,
2506			.init = cc_aead_init,
2507			.exit = cc_aead_exit,
2508			.ivsize = CTR_RFC3686_IV_SIZE,
2509			.maxauthsize = AES_BLOCK_SIZE,
2510		},
2511		.cipher_mode = DRV_CIPHER_CTR,
2512		.flow_mode = S_DIN_to_AES,
2513		.auth_mode = DRV_HASH_XCBC_MAC,
2514		.min_hw_rev = CC_HW_REV_630,
2515		.std_body = CC_STD_NIST,
2516	},
2517	{
2518		.name = "ccm(aes)",
2519		.driver_name = "ccm-aes-ccree",
2520		.blocksize = 1,
2521		.template_aead = {
2522			.setkey = cc_aead_setkey,
2523			.setauthsize = cc_ccm_setauthsize,
2524			.encrypt = cc_aead_encrypt,
2525			.decrypt = cc_aead_decrypt,
2526			.init = cc_aead_init,
2527			.exit = cc_aead_exit,
2528			.ivsize = AES_BLOCK_SIZE,
2529			.maxauthsize = AES_BLOCK_SIZE,
2530		},
2531		.cipher_mode = DRV_CIPHER_CCM,
2532		.flow_mode = S_DIN_to_AES,
2533		.auth_mode = DRV_HASH_NULL,
2534		.min_hw_rev = CC_HW_REV_630,
2535		.std_body = CC_STD_NIST,
2536	},
2537	{
2538		.name = "rfc4309(ccm(aes))",
2539		.driver_name = "rfc4309-ccm-aes-ccree",
2540		.blocksize = 1,
2541		.template_aead = {
2542			.setkey = cc_rfc4309_ccm_setkey,
2543			.setauthsize = cc_rfc4309_ccm_setauthsize,
2544			.encrypt = cc_rfc4309_ccm_encrypt,
2545			.decrypt = cc_rfc4309_ccm_decrypt,
2546			.init = cc_aead_init,
2547			.exit = cc_aead_exit,
2548			.ivsize = CCM_BLOCK_IV_SIZE,
2549			.maxauthsize = AES_BLOCK_SIZE,
2550		},
2551		.cipher_mode = DRV_CIPHER_CCM,
2552		.flow_mode = S_DIN_to_AES,
2553		.auth_mode = DRV_HASH_NULL,
2554		.min_hw_rev = CC_HW_REV_630,
2555		.std_body = CC_STD_NIST,
2556	},
2557	{
2558		.name = "gcm(aes)",
2559		.driver_name = "gcm-aes-ccree",
2560		.blocksize = 1,
2561		.template_aead = {
2562			.setkey = cc_aead_setkey,
2563			.setauthsize = cc_gcm_setauthsize,
2564			.encrypt = cc_aead_encrypt,
2565			.decrypt = cc_aead_decrypt,
2566			.init = cc_aead_init,
2567			.exit = cc_aead_exit,
2568			.ivsize = 12,
2569			.maxauthsize = AES_BLOCK_SIZE,
2570		},
2571		.cipher_mode = DRV_CIPHER_GCTR,
2572		.flow_mode = S_DIN_to_AES,
2573		.auth_mode = DRV_HASH_NULL,
2574		.min_hw_rev = CC_HW_REV_630,
2575		.std_body = CC_STD_NIST,
2576	},
2577	{
2578		.name = "rfc4106(gcm(aes))",
2579		.driver_name = "rfc4106-gcm-aes-ccree",
2580		.blocksize = 1,
2581		.template_aead = {
2582			.setkey = cc_rfc4106_gcm_setkey,
2583			.setauthsize = cc_rfc4106_gcm_setauthsize,
2584			.encrypt = cc_rfc4106_gcm_encrypt,
2585			.decrypt = cc_rfc4106_gcm_decrypt,
2586			.init = cc_aead_init,
2587			.exit = cc_aead_exit,
2588			.ivsize = GCM_BLOCK_RFC4_IV_SIZE,
2589			.maxauthsize = AES_BLOCK_SIZE,
2590		},
2591		.cipher_mode = DRV_CIPHER_GCTR,
2592		.flow_mode = S_DIN_to_AES,
2593		.auth_mode = DRV_HASH_NULL,
2594		.min_hw_rev = CC_HW_REV_630,
2595		.std_body = CC_STD_NIST,
2596	},
2597	{
2598		.name = "rfc4543(gcm(aes))",
2599		.driver_name = "rfc4543-gcm-aes-ccree",
2600		.blocksize = 1,
2601		.template_aead = {
2602			.setkey = cc_rfc4543_gcm_setkey,
2603			.setauthsize = cc_rfc4543_gcm_setauthsize,
2604			.encrypt = cc_rfc4543_gcm_encrypt,
2605			.decrypt = cc_rfc4543_gcm_decrypt,
2606			.init = cc_aead_init,
2607			.exit = cc_aead_exit,
2608			.ivsize = GCM_BLOCK_RFC4_IV_SIZE,
2609			.maxauthsize = AES_BLOCK_SIZE,
2610		},
2611		.cipher_mode = DRV_CIPHER_GCTR,
2612		.flow_mode = S_DIN_to_AES,
2613		.auth_mode = DRV_HASH_NULL,
2614		.min_hw_rev = CC_HW_REV_630,
2615		.std_body = CC_STD_NIST,
2616	},
2617};
2618
2619static struct cc_crypto_alg *cc_create_aead_alg(struct cc_alg_template *tmpl,
2620						struct device *dev)
2621{
2622	struct cc_crypto_alg *t_alg;
2623	struct aead_alg *alg;
2624
2625	t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
2626	if (!t_alg)
2627		return ERR_PTR(-ENOMEM);
2628
2629	alg = &tmpl->template_aead;
2630
2631	snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
2632	snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
2633		 tmpl->driver_name);
 
 
 
 
2634	alg->base.cra_module = THIS_MODULE;
2635	alg->base.cra_priority = CC_CRA_PRIO;
2636
2637	alg->base.cra_ctxsize = sizeof(struct cc_aead_ctx);
2638	alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
 
2639	alg->init = cc_aead_init;
2640	alg->exit = cc_aead_exit;
2641
2642	t_alg->aead_alg = *alg;
2643
2644	t_alg->cipher_mode = tmpl->cipher_mode;
2645	t_alg->flow_mode = tmpl->flow_mode;
2646	t_alg->auth_mode = tmpl->auth_mode;
2647
2648	return t_alg;
2649}
2650
2651int cc_aead_free(struct cc_drvdata *drvdata)
2652{
2653	struct cc_crypto_alg *t_alg, *n;
2654	struct cc_aead_handle *aead_handle =
2655		(struct cc_aead_handle *)drvdata->aead_handle;
2656
2657	if (aead_handle) {
2658		/* Remove registered algs */
2659		list_for_each_entry_safe(t_alg, n, &aead_handle->aead_list,
2660					 entry) {
2661			crypto_unregister_aead(&t_alg->aead_alg);
2662			list_del(&t_alg->entry);
2663			kfree(t_alg);
2664		}
2665		kfree(aead_handle);
2666		drvdata->aead_handle = NULL;
2667	}
2668
2669	return 0;
2670}
2671
2672int cc_aead_alloc(struct cc_drvdata *drvdata)
2673{
2674	struct cc_aead_handle *aead_handle;
2675	struct cc_crypto_alg *t_alg;
2676	int rc = -ENOMEM;
2677	int alg;
2678	struct device *dev = drvdata_to_dev(drvdata);
2679
2680	aead_handle = kmalloc(sizeof(*aead_handle), GFP_KERNEL);
2681	if (!aead_handle) {
2682		rc = -ENOMEM;
2683		goto fail0;
2684	}
2685
2686	INIT_LIST_HEAD(&aead_handle->aead_list);
2687	drvdata->aead_handle = aead_handle;
2688
2689	aead_handle->sram_workspace_addr = cc_sram_alloc(drvdata,
2690							 MAX_HMAC_DIGEST_SIZE);
2691
2692	if (aead_handle->sram_workspace_addr == NULL_SRAM_ADDR) {
2693		dev_err(dev, "SRAM pool exhausted\n");
2694		rc = -ENOMEM;
2695		goto fail1;
2696	}
2697
2698	/* Linux crypto */
2699	for (alg = 0; alg < ARRAY_SIZE(aead_algs); alg++) {
2700		if ((aead_algs[alg].min_hw_rev > drvdata->hw_rev) ||
2701		    !(drvdata->std_bodies & aead_algs[alg].std_body))
2702			continue;
2703
2704		t_alg = cc_create_aead_alg(&aead_algs[alg], dev);
2705		if (IS_ERR(t_alg)) {
2706			rc = PTR_ERR(t_alg);
2707			dev_err(dev, "%s alg allocation failed\n",
2708				aead_algs[alg].driver_name);
2709			goto fail1;
2710		}
2711		t_alg->drvdata = drvdata;
2712		rc = crypto_register_aead(&t_alg->aead_alg);
2713		if (rc) {
2714			dev_err(dev, "%s alg registration failed\n",
2715				t_alg->aead_alg.base.cra_driver_name);
2716			goto fail2;
2717		} else {
2718			list_add_tail(&t_alg->entry, &aead_handle->aead_list);
2719			dev_dbg(dev, "Registered %s\n",
2720				t_alg->aead_alg.base.cra_driver_name);
2721		}
 
 
 
 
2722	}
2723
2724	return 0;
2725
2726fail2:
2727	kfree(t_alg);
2728fail1:
2729	cc_aead_free(drvdata);
2730fail0:
2731	return rc;
2732}