Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
   3
   4#include <linux/kernel.h>
   5#include <linux/module.h>
   6#include <crypto/algapi.h>
   7#include <crypto/hash.h>
   8#include <crypto/md5.h>
   9#include <crypto/sm3.h>
  10#include <crypto/internal/hash.h>
  11
  12#include "cc_driver.h"
  13#include "cc_request_mgr.h"
  14#include "cc_buffer_mgr.h"
  15#include "cc_hash.h"
  16#include "cc_sram_mgr.h"
  17
  18#define CC_MAX_HASH_SEQ_LEN 12
  19#define CC_MAX_OPAD_KEYS_SIZE CC_MAX_HASH_BLCK_SIZE
  20#define CC_SM3_HASH_LEN_SIZE 8
  21
  22struct cc_hash_handle {
  23	u32 digest_len_sram_addr;	/* const value in SRAM*/
  24	u32 larval_digest_sram_addr;   /* const value in SRAM */
  25	struct list_head hash_list;
  26};
  27
  28static const u32 cc_digest_len_init[] = {
  29	0x00000040, 0x00000000, 0x00000000, 0x00000000 };
  30static const u32 cc_md5_init[] = {
  31	SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
  32static const u32 cc_sha1_init[] = {
  33	SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
  34static const u32 cc_sha224_init[] = {
  35	SHA224_H7, SHA224_H6, SHA224_H5, SHA224_H4,
  36	SHA224_H3, SHA224_H2, SHA224_H1, SHA224_H0 };
  37static const u32 cc_sha256_init[] = {
  38	SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4,
  39	SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
  40static const u32 cc_digest_len_sha512_init[] = {
  41	0x00000080, 0x00000000, 0x00000000, 0x00000000 };
  42
  43/*
  44 * Due to the way the HW works, every double word in the SHA384 and SHA512
  45 * larval hashes must be stored in hi/lo order
  46 */
  47#define hilo(x)	upper_32_bits(x), lower_32_bits(x)
  48static const u32 cc_sha384_init[] = {
  49	hilo(SHA384_H7), hilo(SHA384_H6), hilo(SHA384_H5), hilo(SHA384_H4),
  50	hilo(SHA384_H3), hilo(SHA384_H2), hilo(SHA384_H1), hilo(SHA384_H0) };
  51static const u32 cc_sha512_init[] = {
  52	hilo(SHA512_H7), hilo(SHA512_H6), hilo(SHA512_H5), hilo(SHA512_H4),
  53	hilo(SHA512_H3), hilo(SHA512_H2), hilo(SHA512_H1), hilo(SHA512_H0) };
  54
  55static const u32 cc_sm3_init[] = {
  56	SM3_IVH, SM3_IVG, SM3_IVF, SM3_IVE,
  57	SM3_IVD, SM3_IVC, SM3_IVB, SM3_IVA };
  58
  59static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
  60			  unsigned int *seq_size);
  61
  62static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[],
  63			  unsigned int *seq_size);
  64
  65static const void *cc_larval_digest(struct device *dev, u32 mode);
  66
  67struct cc_hash_alg {
  68	struct list_head entry;
  69	int hash_mode;
  70	int hw_mode;
  71	int inter_digestsize;
  72	struct cc_drvdata *drvdata;
  73	struct ahash_alg ahash_alg;
  74};
  75
  76struct hash_key_req_ctx {
  77	u32 keylen;
  78	dma_addr_t key_dma_addr;
  79	u8 *key;
  80};
  81
  82/* hash per-session context */
  83struct cc_hash_ctx {
  84	struct cc_drvdata *drvdata;
  85	/* holds the origin digest; the digest after "setkey" if HMAC,*
  86	 * the initial digest if HASH.
  87	 */
  88	u8 digest_buff[CC_MAX_HASH_DIGEST_SIZE]  ____cacheline_aligned;
  89	u8 opad_tmp_keys_buff[CC_MAX_OPAD_KEYS_SIZE]  ____cacheline_aligned;
  90
  91	dma_addr_t opad_tmp_keys_dma_addr  ____cacheline_aligned;
  92	dma_addr_t digest_buff_dma_addr;
  93	/* use for hmac with key large then mode block size */
  94	struct hash_key_req_ctx key_params;
  95	int hash_mode;
  96	int hw_mode;
  97	int inter_digestsize;
  98	unsigned int hash_len;
  99	struct completion setkey_comp;
 100	bool is_hmac;
 101};
 102
 103static void cc_set_desc(struct ahash_req_ctx *areq_ctx, struct cc_hash_ctx *ctx,
 104			unsigned int flow_mode, struct cc_hw_desc desc[],
 105			bool is_not_last_data, unsigned int *seq_size);
 106
 107static void cc_set_endianity(u32 mode, struct cc_hw_desc *desc)
 108{
 109	if (mode == DRV_HASH_MD5 || mode == DRV_HASH_SHA384 ||
 110	    mode == DRV_HASH_SHA512) {
 111		set_bytes_swap(desc, 1);
 112	} else {
 113		set_cipher_config0(desc, HASH_DIGEST_RESULT_LITTLE_ENDIAN);
 114	}
 115}
 116
 117static int cc_map_result(struct device *dev, struct ahash_req_ctx *state,
 118			 unsigned int digestsize)
 119{
 120	state->digest_result_dma_addr =
 121		dma_map_single(dev, state->digest_result_buff,
 122			       digestsize, DMA_BIDIRECTIONAL);
 123	if (dma_mapping_error(dev, state->digest_result_dma_addr)) {
 124		dev_err(dev, "Mapping digest result buffer %u B for DMA failed\n",
 125			digestsize);
 126		return -ENOMEM;
 127	}
 128	dev_dbg(dev, "Mapped digest result buffer %u B at va=%pK to dma=%pad\n",
 129		digestsize, state->digest_result_buff,
 130		&state->digest_result_dma_addr);
 131
 132	return 0;
 133}
 134
 135static void cc_init_req(struct device *dev, struct ahash_req_ctx *state,
 136			struct cc_hash_ctx *ctx)
 137{
 138	bool is_hmac = ctx->is_hmac;
 139
 140	memset(state, 0, sizeof(*state));
 141
 142	if (is_hmac) {
 143		if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC &&
 144		    ctx->hw_mode != DRV_CIPHER_CMAC) {
 145			dma_sync_single_for_cpu(dev, ctx->digest_buff_dma_addr,
 146						ctx->inter_digestsize,
 147						DMA_BIDIRECTIONAL);
 148
 149			memcpy(state->digest_buff, ctx->digest_buff,
 150			       ctx->inter_digestsize);
 151			if (ctx->hash_mode == DRV_HASH_SHA512 ||
 152			    ctx->hash_mode == DRV_HASH_SHA384)
 153				memcpy(state->digest_bytes_len,
 154				       cc_digest_len_sha512_init,
 155				       ctx->hash_len);
 156			else
 157				memcpy(state->digest_bytes_len,
 158				       cc_digest_len_init,
 159				       ctx->hash_len);
 160		}
 161
 162		if (ctx->hash_mode != DRV_HASH_NULL) {
 163			dma_sync_single_for_cpu(dev,
 164						ctx->opad_tmp_keys_dma_addr,
 165						ctx->inter_digestsize,
 166						DMA_BIDIRECTIONAL);
 167			memcpy(state->opad_digest_buff,
 168			       ctx->opad_tmp_keys_buff, ctx->inter_digestsize);
 169		}
 170	} else { /*hash*/
 171		/* Copy the initial digests if hash flow. */
 172		const void *larval = cc_larval_digest(dev, ctx->hash_mode);
 173
 174		memcpy(state->digest_buff, larval, ctx->inter_digestsize);
 175	}
 176}
 177
 178static int cc_map_req(struct device *dev, struct ahash_req_ctx *state,
 179		      struct cc_hash_ctx *ctx)
 180{
 181	bool is_hmac = ctx->is_hmac;
 182
 183	state->digest_buff_dma_addr =
 184		dma_map_single(dev, state->digest_buff,
 185			       ctx->inter_digestsize, DMA_BIDIRECTIONAL);
 186	if (dma_mapping_error(dev, state->digest_buff_dma_addr)) {
 187		dev_err(dev, "Mapping digest len %d B at va=%pK for DMA failed\n",
 188			ctx->inter_digestsize, state->digest_buff);
 189		return -EINVAL;
 190	}
 191	dev_dbg(dev, "Mapped digest %d B at va=%pK to dma=%pad\n",
 192		ctx->inter_digestsize, state->digest_buff,
 193		&state->digest_buff_dma_addr);
 194
 195	if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
 196		state->digest_bytes_len_dma_addr =
 197			dma_map_single(dev, state->digest_bytes_len,
 198				       HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
 199		if (dma_mapping_error(dev, state->digest_bytes_len_dma_addr)) {
 200			dev_err(dev, "Mapping digest len %u B at va=%pK for DMA failed\n",
 201				HASH_MAX_LEN_SIZE, state->digest_bytes_len);
 202			goto unmap_digest_buf;
 203		}
 204		dev_dbg(dev, "Mapped digest len %u B at va=%pK to dma=%pad\n",
 205			HASH_MAX_LEN_SIZE, state->digest_bytes_len,
 206			&state->digest_bytes_len_dma_addr);
 207	}
 208
 209	if (is_hmac && ctx->hash_mode != DRV_HASH_NULL) {
 210		state->opad_digest_dma_addr =
 211			dma_map_single(dev, state->opad_digest_buff,
 212				       ctx->inter_digestsize,
 213				       DMA_BIDIRECTIONAL);
 214		if (dma_mapping_error(dev, state->opad_digest_dma_addr)) {
 215			dev_err(dev, "Mapping opad digest %d B at va=%pK for DMA failed\n",
 216				ctx->inter_digestsize,
 217				state->opad_digest_buff);
 218			goto unmap_digest_len;
 219		}
 220		dev_dbg(dev, "Mapped opad digest %d B at va=%pK to dma=%pad\n",
 221			ctx->inter_digestsize, state->opad_digest_buff,
 222			&state->opad_digest_dma_addr);
 223	}
 224
 225	return 0;
 226
 227unmap_digest_len:
 228	if (state->digest_bytes_len_dma_addr) {
 229		dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
 230				 HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
 231		state->digest_bytes_len_dma_addr = 0;
 232	}
 233unmap_digest_buf:
 234	if (state->digest_buff_dma_addr) {
 235		dma_unmap_single(dev, state->digest_buff_dma_addr,
 236				 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
 237		state->digest_buff_dma_addr = 0;
 238	}
 239
 240	return -EINVAL;
 241}
 242
 243static void cc_unmap_req(struct device *dev, struct ahash_req_ctx *state,
 244			 struct cc_hash_ctx *ctx)
 245{
 246	if (state->digest_buff_dma_addr) {
 247		dma_unmap_single(dev, state->digest_buff_dma_addr,
 248				 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
 249		dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
 250			&state->digest_buff_dma_addr);
 251		state->digest_buff_dma_addr = 0;
 252	}
 253	if (state->digest_bytes_len_dma_addr) {
 254		dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
 255				 HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
 256		dev_dbg(dev, "Unmapped digest-bytes-len buffer: digest_bytes_len_dma_addr=%pad\n",
 257			&state->digest_bytes_len_dma_addr);
 258		state->digest_bytes_len_dma_addr = 0;
 259	}
 260	if (state->opad_digest_dma_addr) {
 261		dma_unmap_single(dev, state->opad_digest_dma_addr,
 262				 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
 263		dev_dbg(dev, "Unmapped opad-digest: opad_digest_dma_addr=%pad\n",
 264			&state->opad_digest_dma_addr);
 265		state->opad_digest_dma_addr = 0;
 266	}
 267}
 268
 269static void cc_unmap_result(struct device *dev, struct ahash_req_ctx *state,
 270			    unsigned int digestsize, u8 *result)
 271{
 272	if (state->digest_result_dma_addr) {
 273		dma_unmap_single(dev, state->digest_result_dma_addr, digestsize,
 274				 DMA_BIDIRECTIONAL);
 275		dev_dbg(dev, "unmpa digest result buffer va (%pK) pa (%pad) len %u\n",
 276			state->digest_result_buff,
 277			&state->digest_result_dma_addr, digestsize);
 278		memcpy(result, state->digest_result_buff, digestsize);
 279	}
 280	state->digest_result_dma_addr = 0;
 281}
 282
 283static void cc_update_complete(struct device *dev, void *cc_req, int err)
 284{
 285	struct ahash_request *req = (struct ahash_request *)cc_req;
 286	struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
 287	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 288	struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
 289
 290	dev_dbg(dev, "req=%pK\n", req);
 291
 292	if (err != -EINPROGRESS) {
 293		/* Not a BACKLOG notification */
 294		cc_unmap_hash_request(dev, state, req->src, false);
 295		cc_unmap_req(dev, state, ctx);
 296	}
 297
 298	ahash_request_complete(req, err);
 299}
 300
 301static void cc_digest_complete(struct device *dev, void *cc_req, int err)
 302{
 303	struct ahash_request *req = (struct ahash_request *)cc_req;
 304	struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
 305	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 306	struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
 307	u32 digestsize = crypto_ahash_digestsize(tfm);
 308
 309	dev_dbg(dev, "req=%pK\n", req);
 310
 311	if (err != -EINPROGRESS) {
 312		/* Not a BACKLOG notification */
 313		cc_unmap_hash_request(dev, state, req->src, false);
 314		cc_unmap_result(dev, state, digestsize, req->result);
 315		cc_unmap_req(dev, state, ctx);
 316	}
 317
 318	ahash_request_complete(req, err);
 319}
 320
 321static void cc_hash_complete(struct device *dev, void *cc_req, int err)
 322{
 323	struct ahash_request *req = (struct ahash_request *)cc_req;
 324	struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
 325	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 326	struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
 327	u32 digestsize = crypto_ahash_digestsize(tfm);
 328
 329	dev_dbg(dev, "req=%pK\n", req);
 330
 331	if (err != -EINPROGRESS) {
 332		/* Not a BACKLOG notification */
 333		cc_unmap_hash_request(dev, state, req->src, false);
 334		cc_unmap_result(dev, state, digestsize, req->result);
 335		cc_unmap_req(dev, state, ctx);
 336	}
 337
 338	ahash_request_complete(req, err);
 339}
 340
 341static int cc_fin_result(struct cc_hw_desc *desc, struct ahash_request *req,
 342			 int idx)
 343{
 344	struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
 345	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 346	struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
 347	u32 digestsize = crypto_ahash_digestsize(tfm);
 348
 349	/* Get final MAC result */
 350	hw_desc_init(&desc[idx]);
 351	set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
 
 352	set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
 353		      NS_BIT, 1);
 354	set_queue_last_ind(ctx->drvdata, &desc[idx]);
 355	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 356	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 357	set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
 358	cc_set_endianity(ctx->hash_mode, &desc[idx]);
 359	idx++;
 360
 361	return idx;
 362}
 363
 364static int cc_fin_hmac(struct cc_hw_desc *desc, struct ahash_request *req,
 365		       int idx)
 366{
 367	struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
 368	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 369	struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
 370	u32 digestsize = crypto_ahash_digestsize(tfm);
 371
 372	/* store the hash digest result in the context */
 373	hw_desc_init(&desc[idx]);
 374	set_cipher_mode(&desc[idx], ctx->hw_mode);
 375	set_dout_dlli(&desc[idx], state->digest_buff_dma_addr, digestsize,
 376		      NS_BIT, 0);
 377	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 378	cc_set_endianity(ctx->hash_mode, &desc[idx]);
 379	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 380	idx++;
 381
 382	/* Loading hash opad xor key state */
 383	hw_desc_init(&desc[idx]);
 384	set_cipher_mode(&desc[idx], ctx->hw_mode);
 385	set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
 386		     ctx->inter_digestsize, NS_BIT);
 387	set_flow_mode(&desc[idx], S_DIN_to_HASH);
 388	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 389	idx++;
 390
 391	/* Load the hash current length */
 392	hw_desc_init(&desc[idx]);
 393	set_cipher_mode(&desc[idx], ctx->hw_mode);
 394	set_din_sram(&desc[idx],
 395		     cc_digest_len_addr(ctx->drvdata, ctx->hash_mode),
 396		     ctx->hash_len);
 397	set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
 398	set_flow_mode(&desc[idx], S_DIN_to_HASH);
 399	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 400	idx++;
 401
 402	/* Memory Barrier: wait for IPAD/OPAD axi write to complete */
 403	hw_desc_init(&desc[idx]);
 404	set_din_no_dma(&desc[idx], 0, 0xfffff0);
 405	set_dout_no_dma(&desc[idx], 0, 0, 1);
 406	idx++;
 407
 408	/* Perform HASH update */
 409	hw_desc_init(&desc[idx]);
 410	set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
 411		     digestsize, NS_BIT);
 412	set_flow_mode(&desc[idx], DIN_HASH);
 413	idx++;
 414
 415	return idx;
 416}
 417
 418static int cc_hash_digest(struct ahash_request *req)
 419{
 420	struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
 421	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 422	struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
 423	u32 digestsize = crypto_ahash_digestsize(tfm);
 424	struct scatterlist *src = req->src;
 425	unsigned int nbytes = req->nbytes;
 426	u8 *result = req->result;
 427	struct device *dev = drvdata_to_dev(ctx->drvdata);
 428	bool is_hmac = ctx->is_hmac;
 429	struct cc_crypto_req cc_req = {};
 430	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
 431	u32 larval_digest_addr;
 
 432	int idx = 0;
 433	int rc = 0;
 434	gfp_t flags = cc_gfp_flags(&req->base);
 435
 436	dev_dbg(dev, "===== %s-digest (%d) ====\n", is_hmac ? "hmac" : "hash",
 437		nbytes);
 438
 439	cc_init_req(dev, state, ctx);
 440
 441	if (cc_map_req(dev, state, ctx)) {
 442		dev_err(dev, "map_ahash_source() failed\n");
 443		return -ENOMEM;
 444	}
 445
 446	if (cc_map_result(dev, state, digestsize)) {
 447		dev_err(dev, "map_ahash_digest() failed\n");
 448		cc_unmap_req(dev, state, ctx);
 449		return -ENOMEM;
 450	}
 451
 452	if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1,
 453				      flags)) {
 454		dev_err(dev, "map_ahash_request_final() failed\n");
 455		cc_unmap_result(dev, state, digestsize, result);
 456		cc_unmap_req(dev, state, ctx);
 457		return -ENOMEM;
 458	}
 459
 460	/* Setup request structure */
 461	cc_req.user_cb = cc_digest_complete;
 462	cc_req.user_arg = req;
 463
 464	/* If HMAC then load hash IPAD xor key, if HASH then load initial
 465	 * digest
 466	 */
 467	hw_desc_init(&desc[idx]);
 468	set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
 469	if (is_hmac) {
 470		set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
 471			     ctx->inter_digestsize, NS_BIT);
 472	} else {
 473		larval_digest_addr = cc_larval_digest_addr(ctx->drvdata,
 474							   ctx->hash_mode);
 475		set_din_sram(&desc[idx], larval_digest_addr,
 476			     ctx->inter_digestsize);
 477	}
 478	set_flow_mode(&desc[idx], S_DIN_to_HASH);
 479	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 480	idx++;
 481
 482	/* Load the hash current length */
 483	hw_desc_init(&desc[idx]);
 484	set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
 485
 486	if (is_hmac) {
 487		set_din_type(&desc[idx], DMA_DLLI,
 488			     state->digest_bytes_len_dma_addr,
 489			     ctx->hash_len, NS_BIT);
 490	} else {
 491		set_din_const(&desc[idx], 0, ctx->hash_len);
 492		if (nbytes)
 493			set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
 494		else
 495			set_cipher_do(&desc[idx], DO_PAD);
 496	}
 497	set_flow_mode(&desc[idx], S_DIN_to_HASH);
 498	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 499	idx++;
 500
 501	cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx);
 502
 503	if (is_hmac) {
 504		/* HW last hash block padding (aka. "DO_PAD") */
 505		hw_desc_init(&desc[idx]);
 506		set_cipher_mode(&desc[idx], ctx->hw_mode);
 507		set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
 508			      ctx->hash_len, NS_BIT, 0);
 509		set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 510		set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
 511		set_cipher_do(&desc[idx], DO_PAD);
 512		idx++;
 513
 514		idx = cc_fin_hmac(desc, req, idx);
 515	}
 516
 517	idx = cc_fin_result(desc, req, idx);
 518
 519	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
 520	if (rc != -EINPROGRESS && rc != -EBUSY) {
 521		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
 522		cc_unmap_hash_request(dev, state, src, true);
 523		cc_unmap_result(dev, state, digestsize, result);
 524		cc_unmap_req(dev, state, ctx);
 525	}
 526	return rc;
 527}
 528
 529static int cc_restore_hash(struct cc_hw_desc *desc, struct cc_hash_ctx *ctx,
 530			   struct ahash_req_ctx *state, unsigned int idx)
 531{
 532	/* Restore hash digest */
 533	hw_desc_init(&desc[idx]);
 534	set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
 535	set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
 536		     ctx->inter_digestsize, NS_BIT);
 537	set_flow_mode(&desc[idx], S_DIN_to_HASH);
 538	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 539	idx++;
 540
 541	/* Restore hash current length */
 542	hw_desc_init(&desc[idx]);
 543	set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
 544	set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
 545	set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
 546		     ctx->hash_len, NS_BIT);
 547	set_flow_mode(&desc[idx], S_DIN_to_HASH);
 548	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 549	idx++;
 550
 551	cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx);
 552
 553	return idx;
 554}
 555
 556static int cc_hash_update(struct ahash_request *req)
 557{
 558	struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
 559	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 560	struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
 561	unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
 562	struct scatterlist *src = req->src;
 563	unsigned int nbytes = req->nbytes;
 564	struct device *dev = drvdata_to_dev(ctx->drvdata);
 565	struct cc_crypto_req cc_req = {};
 566	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
 567	u32 idx = 0;
 568	int rc;
 569	gfp_t flags = cc_gfp_flags(&req->base);
 570
 571	dev_dbg(dev, "===== %s-update (%d) ====\n", ctx->is_hmac ?
 572		"hmac" : "hash", nbytes);
 573
 574	if (nbytes == 0) {
 575		/* no real updates required */
 576		return 0;
 577	}
 578
 579	rc = cc_map_hash_request_update(ctx->drvdata, state, src, nbytes,
 580					block_size, flags);
 581	if (rc) {
 582		if (rc == 1) {
 583			dev_dbg(dev, " data size not require HW update %x\n",
 584				nbytes);
 585			/* No hardware updates are required */
 586			return 0;
 587		}
 588		dev_err(dev, "map_ahash_request_update() failed\n");
 589		return -ENOMEM;
 590	}
 591
 592	if (cc_map_req(dev, state, ctx)) {
 593		dev_err(dev, "map_ahash_source() failed\n");
 594		cc_unmap_hash_request(dev, state, src, true);
 595		return -EINVAL;
 596	}
 597
 598	/* Setup request structure */
 599	cc_req.user_cb = cc_update_complete;
 600	cc_req.user_arg = req;
 601
 602	idx = cc_restore_hash(desc, ctx, state, idx);
 603
 604	/* store the hash digest result in context */
 605	hw_desc_init(&desc[idx]);
 606	set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
 607	set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
 608		      ctx->inter_digestsize, NS_BIT, 0);
 609	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 610	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 611	idx++;
 612
 613	/* store current hash length in context */
 614	hw_desc_init(&desc[idx]);
 615	set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
 616	set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
 617		      ctx->hash_len, NS_BIT, 1);
 618	set_queue_last_ind(ctx->drvdata, &desc[idx]);
 619	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 620	set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
 621	idx++;
 622
 623	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
 624	if (rc != -EINPROGRESS && rc != -EBUSY) {
 625		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
 626		cc_unmap_hash_request(dev, state, src, true);
 627		cc_unmap_req(dev, state, ctx);
 628	}
 629	return rc;
 630}
 631
 632static int cc_do_finup(struct ahash_request *req, bool update)
 633{
 634	struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
 635	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 636	struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
 637	u32 digestsize = crypto_ahash_digestsize(tfm);
 638	struct scatterlist *src = req->src;
 639	unsigned int nbytes = req->nbytes;
 640	u8 *result = req->result;
 641	struct device *dev = drvdata_to_dev(ctx->drvdata);
 642	bool is_hmac = ctx->is_hmac;
 643	struct cc_crypto_req cc_req = {};
 644	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
 645	unsigned int idx = 0;
 646	int rc;
 647	gfp_t flags = cc_gfp_flags(&req->base);
 648
 649	dev_dbg(dev, "===== %s-%s (%d) ====\n", is_hmac ? "hmac" : "hash",
 650		update ? "finup" : "final", nbytes);
 651
 652	if (cc_map_req(dev, state, ctx)) {
 653		dev_err(dev, "map_ahash_source() failed\n");
 654		return -EINVAL;
 655	}
 656
 657	if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, update,
 658				      flags)) {
 659		dev_err(dev, "map_ahash_request_final() failed\n");
 660		cc_unmap_req(dev, state, ctx);
 661		return -ENOMEM;
 662	}
 663	if (cc_map_result(dev, state, digestsize)) {
 664		dev_err(dev, "map_ahash_digest() failed\n");
 665		cc_unmap_hash_request(dev, state, src, true);
 666		cc_unmap_req(dev, state, ctx);
 667		return -ENOMEM;
 668	}
 669
 670	/* Setup request structure */
 671	cc_req.user_cb = cc_hash_complete;
 672	cc_req.user_arg = req;
 673
 674	idx = cc_restore_hash(desc, ctx, state, idx);
 675
 676	/* Pad the hash */
 677	hw_desc_init(&desc[idx]);
 678	set_cipher_do(&desc[idx], DO_PAD);
 679	set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
 680	set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
 681		      ctx->hash_len, NS_BIT, 0);
 682	set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
 683	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 684	idx++;
 685
 686	if (is_hmac)
 687		idx = cc_fin_hmac(desc, req, idx);
 688
 689	idx = cc_fin_result(desc, req, idx);
 690
 691	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
 692	if (rc != -EINPROGRESS && rc != -EBUSY) {
 693		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
 694		cc_unmap_hash_request(dev, state, src, true);
 695		cc_unmap_result(dev, state, digestsize, result);
 696		cc_unmap_req(dev, state, ctx);
 697	}
 698	return rc;
 699}
 700
 701static int cc_hash_finup(struct ahash_request *req)
 702{
 703	return cc_do_finup(req, true);
 704}
 705
 706
 707static int cc_hash_final(struct ahash_request *req)
 708{
 709	return cc_do_finup(req, false);
 710}
 711
 712static int cc_hash_init(struct ahash_request *req)
 713{
 714	struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
 715	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 716	struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
 717	struct device *dev = drvdata_to_dev(ctx->drvdata);
 718
 719	dev_dbg(dev, "===== init (%d) ====\n", req->nbytes);
 720
 721	cc_init_req(dev, state, ctx);
 722
 723	return 0;
 724}
 725
 726static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
 727			  unsigned int keylen)
 728{
 729	unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
 730	struct cc_crypto_req cc_req = {};
 731	struct cc_hash_ctx *ctx = NULL;
 732	int blocksize = 0;
 733	int digestsize = 0;
 734	int i, idx = 0, rc = 0;
 735	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
 736	u32 larval_addr;
 737	struct device *dev;
 738
 739	ctx = crypto_ahash_ctx_dma(ahash);
 740	dev = drvdata_to_dev(ctx->drvdata);
 741	dev_dbg(dev, "start keylen: %d", keylen);
 742
 743	blocksize = crypto_tfm_alg_blocksize(&ahash->base);
 744	digestsize = crypto_ahash_digestsize(ahash);
 745
 746	larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->hash_mode);
 747
 748	/* The keylen value distinguishes HASH in case keylen is ZERO bytes,
 749	 * any NON-ZERO value utilizes HMAC flow
 750	 */
 751	ctx->key_params.keylen = keylen;
 752	ctx->key_params.key_dma_addr = 0;
 753	ctx->is_hmac = true;
 754	ctx->key_params.key = NULL;
 755
 756	if (keylen) {
 757		ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL);
 758		if (!ctx->key_params.key)
 759			return -ENOMEM;
 760
 761		ctx->key_params.key_dma_addr =
 762			dma_map_single(dev, ctx->key_params.key, keylen,
 763				       DMA_TO_DEVICE);
 764		if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
 765			dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
 766				ctx->key_params.key, keylen);
 767			kfree_sensitive(ctx->key_params.key);
 768			return -ENOMEM;
 769		}
 770		dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
 771			&ctx->key_params.key_dma_addr, ctx->key_params.keylen);
 772
 773		if (keylen > blocksize) {
 774			/* Load hash initial state */
 775			hw_desc_init(&desc[idx]);
 776			set_cipher_mode(&desc[idx], ctx->hw_mode);
 777			set_din_sram(&desc[idx], larval_addr,
 778				     ctx->inter_digestsize);
 779			set_flow_mode(&desc[idx], S_DIN_to_HASH);
 780			set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 781			idx++;
 782
 783			/* Load the hash current length*/
 784			hw_desc_init(&desc[idx]);
 785			set_cipher_mode(&desc[idx], ctx->hw_mode);
 786			set_din_const(&desc[idx], 0, ctx->hash_len);
 787			set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
 788			set_flow_mode(&desc[idx], S_DIN_to_HASH);
 789			set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 790			idx++;
 791
 792			hw_desc_init(&desc[idx]);
 793			set_din_type(&desc[idx], DMA_DLLI,
 794				     ctx->key_params.key_dma_addr, keylen,
 795				     NS_BIT);
 796			set_flow_mode(&desc[idx], DIN_HASH);
 797			idx++;
 798
 799			/* Get hashed key */
 800			hw_desc_init(&desc[idx]);
 801			set_cipher_mode(&desc[idx], ctx->hw_mode);
 802			set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
 803				      digestsize, NS_BIT, 0);
 804			set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 805			set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 806			set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
 807			cc_set_endianity(ctx->hash_mode, &desc[idx]);
 808			idx++;
 809
 810			hw_desc_init(&desc[idx]);
 811			set_din_const(&desc[idx], 0, (blocksize - digestsize));
 812			set_flow_mode(&desc[idx], BYPASS);
 813			set_dout_dlli(&desc[idx],
 814				      (ctx->opad_tmp_keys_dma_addr +
 815				       digestsize),
 816				      (blocksize - digestsize), NS_BIT, 0);
 817			idx++;
 818		} else {
 819			hw_desc_init(&desc[idx]);
 820			set_din_type(&desc[idx], DMA_DLLI,
 821				     ctx->key_params.key_dma_addr, keylen,
 822				     NS_BIT);
 823			set_flow_mode(&desc[idx], BYPASS);
 824			set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
 825				      keylen, NS_BIT, 0);
 826			idx++;
 827
 828			if ((blocksize - keylen)) {
 829				hw_desc_init(&desc[idx]);
 830				set_din_const(&desc[idx], 0,
 831					      (blocksize - keylen));
 832				set_flow_mode(&desc[idx], BYPASS);
 833				set_dout_dlli(&desc[idx],
 834					      (ctx->opad_tmp_keys_dma_addr +
 835					       keylen), (blocksize - keylen),
 836					      NS_BIT, 0);
 837				idx++;
 838			}
 839		}
 840	} else {
 841		hw_desc_init(&desc[idx]);
 842		set_din_const(&desc[idx], 0, blocksize);
 843		set_flow_mode(&desc[idx], BYPASS);
 844		set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr),
 845			      blocksize, NS_BIT, 0);
 846		idx++;
 847	}
 848
 849	rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
 850	if (rc) {
 851		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
 852		goto out;
 853	}
 854
 855	/* calc derived HMAC key */
 856	for (idx = 0, i = 0; i < 2; i++) {
 857		/* Load hash initial state */
 858		hw_desc_init(&desc[idx]);
 859		set_cipher_mode(&desc[idx], ctx->hw_mode);
 860		set_din_sram(&desc[idx], larval_addr, ctx->inter_digestsize);
 861		set_flow_mode(&desc[idx], S_DIN_to_HASH);
 862		set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 863		idx++;
 864
 865		/* Load the hash current length*/
 866		hw_desc_init(&desc[idx]);
 867		set_cipher_mode(&desc[idx], ctx->hw_mode);
 868		set_din_const(&desc[idx], 0, ctx->hash_len);
 869		set_flow_mode(&desc[idx], S_DIN_to_HASH);
 870		set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 871		idx++;
 872
 873		/* Prepare ipad key */
 874		hw_desc_init(&desc[idx]);
 875		set_xor_val(&desc[idx], hmac_pad_const[i]);
 876		set_cipher_mode(&desc[idx], ctx->hw_mode);
 877		set_flow_mode(&desc[idx], S_DIN_to_HASH);
 878		set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
 879		idx++;
 880
 881		/* Perform HASH update */
 882		hw_desc_init(&desc[idx]);
 883		set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
 884			     blocksize, NS_BIT);
 885		set_cipher_mode(&desc[idx], ctx->hw_mode);
 886		set_xor_active(&desc[idx]);
 887		set_flow_mode(&desc[idx], DIN_HASH);
 888		idx++;
 889
 890		/* Get the IPAD/OPAD xor key (Note, IPAD is the initial digest
 891		 * of the first HASH "update" state)
 892		 */
 893		hw_desc_init(&desc[idx]);
 894		set_cipher_mode(&desc[idx], ctx->hw_mode);
 895		if (i > 0) /* Not first iteration */
 896			set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
 897				      ctx->inter_digestsize, NS_BIT, 0);
 898		else /* First iteration */
 899			set_dout_dlli(&desc[idx], ctx->digest_buff_dma_addr,
 900				      ctx->inter_digestsize, NS_BIT, 0);
 901		set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 902		set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 903		idx++;
 904	}
 905
 906	rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
 907
 908out:
 
 
 
 909	if (ctx->key_params.key_dma_addr) {
 910		dma_unmap_single(dev, ctx->key_params.key_dma_addr,
 911				 ctx->key_params.keylen, DMA_TO_DEVICE);
 912		dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
 913			&ctx->key_params.key_dma_addr, ctx->key_params.keylen);
 914	}
 915
 916	kfree_sensitive(ctx->key_params.key);
 917
 918	return rc;
 919}
 920
 921static int cc_xcbc_setkey(struct crypto_ahash *ahash,
 922			  const u8 *key, unsigned int keylen)
 923{
 924	struct cc_crypto_req cc_req = {};
 925	struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
 926	struct device *dev = drvdata_to_dev(ctx->drvdata);
 927	int rc = 0;
 928	unsigned int idx = 0;
 929	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
 930
 931	dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
 932
 933	switch (keylen) {
 934	case AES_KEYSIZE_128:
 935	case AES_KEYSIZE_192:
 936	case AES_KEYSIZE_256:
 937		break;
 938	default:
 939		return -EINVAL;
 940	}
 941
 942	ctx->key_params.keylen = keylen;
 943
 944	ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL);
 945	if (!ctx->key_params.key)
 946		return -ENOMEM;
 947
 948	ctx->key_params.key_dma_addr =
 949		dma_map_single(dev, ctx->key_params.key, keylen, DMA_TO_DEVICE);
 950	if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
 951		dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
 952			key, keylen);
 953		kfree_sensitive(ctx->key_params.key);
 954		return -ENOMEM;
 955	}
 956	dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
 957		&ctx->key_params.key_dma_addr, ctx->key_params.keylen);
 958
 959	ctx->is_hmac = true;
 960	/* 1. Load the AES key */
 961	hw_desc_init(&desc[idx]);
 962	set_din_type(&desc[idx], DMA_DLLI, ctx->key_params.key_dma_addr,
 963		     keylen, NS_BIT);
 964	set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
 965	set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
 966	set_key_size_aes(&desc[idx], keylen);
 967	set_flow_mode(&desc[idx], S_DIN_to_AES);
 968	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 969	idx++;
 970
 971	hw_desc_init(&desc[idx]);
 972	set_din_const(&desc[idx], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
 973	set_flow_mode(&desc[idx], DIN_AES_DOUT);
 974	set_dout_dlli(&desc[idx],
 975		      (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET),
 976		      CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
 977	idx++;
 978
 979	hw_desc_init(&desc[idx]);
 980	set_din_const(&desc[idx], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
 981	set_flow_mode(&desc[idx], DIN_AES_DOUT);
 982	set_dout_dlli(&desc[idx],
 983		      (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET),
 984		      CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
 985	idx++;
 986
 987	hw_desc_init(&desc[idx]);
 988	set_din_const(&desc[idx], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
 989	set_flow_mode(&desc[idx], DIN_AES_DOUT);
 990	set_dout_dlli(&desc[idx],
 991		      (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET),
 992		      CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
 993	idx++;
 994
 995	rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
 996
 
 
 
 997	dma_unmap_single(dev, ctx->key_params.key_dma_addr,
 998			 ctx->key_params.keylen, DMA_TO_DEVICE);
 999	dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
1000		&ctx->key_params.key_dma_addr, ctx->key_params.keylen);
1001
1002	kfree_sensitive(ctx->key_params.key);
1003
1004	return rc;
1005}
1006
1007static int cc_cmac_setkey(struct crypto_ahash *ahash,
1008			  const u8 *key, unsigned int keylen)
1009{
1010	struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1011	struct device *dev = drvdata_to_dev(ctx->drvdata);
1012
1013	dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
1014
1015	ctx->is_hmac = true;
1016
1017	switch (keylen) {
1018	case AES_KEYSIZE_128:
1019	case AES_KEYSIZE_192:
1020	case AES_KEYSIZE_256:
1021		break;
1022	default:
1023		return -EINVAL;
1024	}
1025
1026	ctx->key_params.keylen = keylen;
1027
1028	/* STAT_PHASE_1: Copy key to ctx */
1029
1030	dma_sync_single_for_cpu(dev, ctx->opad_tmp_keys_dma_addr,
1031				keylen, DMA_TO_DEVICE);
1032
1033	memcpy(ctx->opad_tmp_keys_buff, key, keylen);
1034	if (keylen == 24) {
1035		memset(ctx->opad_tmp_keys_buff + 24, 0,
1036		       CC_AES_KEY_SIZE_MAX - 24);
1037	}
1038
1039	dma_sync_single_for_device(dev, ctx->opad_tmp_keys_dma_addr,
1040				   keylen, DMA_TO_DEVICE);
1041
1042	ctx->key_params.keylen = keylen;
1043
1044	return 0;
1045}
1046
1047static void cc_free_ctx(struct cc_hash_ctx *ctx)
1048{
1049	struct device *dev = drvdata_to_dev(ctx->drvdata);
1050
1051	if (ctx->digest_buff_dma_addr) {
1052		dma_unmap_single(dev, ctx->digest_buff_dma_addr,
1053				 sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
1054		dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
1055			&ctx->digest_buff_dma_addr);
1056		ctx->digest_buff_dma_addr = 0;
1057	}
1058	if (ctx->opad_tmp_keys_dma_addr) {
1059		dma_unmap_single(dev, ctx->opad_tmp_keys_dma_addr,
1060				 sizeof(ctx->opad_tmp_keys_buff),
1061				 DMA_BIDIRECTIONAL);
1062		dev_dbg(dev, "Unmapped opad-digest: opad_tmp_keys_dma_addr=%pad\n",
1063			&ctx->opad_tmp_keys_dma_addr);
1064		ctx->opad_tmp_keys_dma_addr = 0;
1065	}
1066
1067	ctx->key_params.keylen = 0;
1068}
1069
1070static int cc_alloc_ctx(struct cc_hash_ctx *ctx)
1071{
1072	struct device *dev = drvdata_to_dev(ctx->drvdata);
1073
1074	ctx->key_params.keylen = 0;
1075
1076	ctx->digest_buff_dma_addr =
1077		dma_map_single(dev, ctx->digest_buff, sizeof(ctx->digest_buff),
1078			       DMA_BIDIRECTIONAL);
1079	if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) {
1080		dev_err(dev, "Mapping digest len %zu B at va=%pK for DMA failed\n",
1081			sizeof(ctx->digest_buff), ctx->digest_buff);
1082		goto fail;
1083	}
1084	dev_dbg(dev, "Mapped digest %zu B at va=%pK to dma=%pad\n",
1085		sizeof(ctx->digest_buff), ctx->digest_buff,
1086		&ctx->digest_buff_dma_addr);
1087
1088	ctx->opad_tmp_keys_dma_addr =
1089		dma_map_single(dev, ctx->opad_tmp_keys_buff,
1090			       sizeof(ctx->opad_tmp_keys_buff),
1091			       DMA_BIDIRECTIONAL);
1092	if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) {
1093		dev_err(dev, "Mapping opad digest %zu B at va=%pK for DMA failed\n",
1094			sizeof(ctx->opad_tmp_keys_buff),
1095			ctx->opad_tmp_keys_buff);
1096		goto fail;
1097	}
1098	dev_dbg(dev, "Mapped opad_tmp_keys %zu B at va=%pK to dma=%pad\n",
1099		sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff,
1100		&ctx->opad_tmp_keys_dma_addr);
1101
1102	ctx->is_hmac = false;
1103	return 0;
1104
1105fail:
1106	cc_free_ctx(ctx);
1107	return -ENOMEM;
1108}
1109
1110static int cc_get_hash_len(struct crypto_tfm *tfm)
1111{
1112	struct cc_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm);
1113
1114	if (ctx->hash_mode == DRV_HASH_SM3)
1115		return CC_SM3_HASH_LEN_SIZE;
1116	else
1117		return cc_get_default_hash_len(ctx->drvdata);
1118}
1119
1120static int cc_cra_init(struct crypto_tfm *tfm)
1121{
1122	struct cc_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm);
1123	struct hash_alg_common *hash_alg_common =
1124		container_of(tfm->__crt_alg, struct hash_alg_common, base);
1125	struct ahash_alg *ahash_alg =
1126		container_of(hash_alg_common, struct ahash_alg, halg);
1127	struct cc_hash_alg *cc_alg =
1128			container_of(ahash_alg, struct cc_hash_alg, ahash_alg);
1129
1130	crypto_ahash_set_reqsize_dma(__crypto_ahash_cast(tfm),
1131				     sizeof(struct ahash_req_ctx));
1132
1133	ctx->hash_mode = cc_alg->hash_mode;
1134	ctx->hw_mode = cc_alg->hw_mode;
1135	ctx->inter_digestsize = cc_alg->inter_digestsize;
1136	ctx->drvdata = cc_alg->drvdata;
1137	ctx->hash_len = cc_get_hash_len(tfm);
1138	return cc_alloc_ctx(ctx);
1139}
1140
1141static void cc_cra_exit(struct crypto_tfm *tfm)
1142{
1143	struct cc_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm);
1144	struct device *dev = drvdata_to_dev(ctx->drvdata);
1145
1146	dev_dbg(dev, "cc_cra_exit");
1147	cc_free_ctx(ctx);
1148}
1149
1150static int cc_mac_update(struct ahash_request *req)
1151{
1152	struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
1153	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1154	struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
1155	struct device *dev = drvdata_to_dev(ctx->drvdata);
1156	unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
1157	struct cc_crypto_req cc_req = {};
1158	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1159	int rc;
1160	u32 idx = 0;
1161	gfp_t flags = cc_gfp_flags(&req->base);
1162
1163	if (req->nbytes == 0) {
1164		/* no real updates required */
1165		return 0;
1166	}
1167
1168	state->xcbc_count++;
1169
1170	rc = cc_map_hash_request_update(ctx->drvdata, state, req->src,
1171					req->nbytes, block_size, flags);
1172	if (rc) {
1173		if (rc == 1) {
1174			dev_dbg(dev, " data size not require HW update %x\n",
1175				req->nbytes);
1176			/* No hardware updates are required */
1177			return 0;
1178		}
1179		dev_err(dev, "map_ahash_request_update() failed\n");
1180		return -ENOMEM;
1181	}
1182
1183	if (cc_map_req(dev, state, ctx)) {
1184		dev_err(dev, "map_ahash_source() failed\n");
1185		return -EINVAL;
1186	}
1187
1188	if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
1189		cc_setup_xcbc(req, desc, &idx);
1190	else
1191		cc_setup_cmac(req, desc, &idx);
1192
1193	cc_set_desc(state, ctx, DIN_AES_DOUT, desc, true, &idx);
1194
1195	/* store the hash digest result in context */
1196	hw_desc_init(&desc[idx]);
1197	set_cipher_mode(&desc[idx], ctx->hw_mode);
1198	set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
1199		      ctx->inter_digestsize, NS_BIT, 1);
1200	set_queue_last_ind(ctx->drvdata, &desc[idx]);
1201	set_flow_mode(&desc[idx], S_AES_to_DOUT);
1202	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1203	idx++;
1204
1205	/* Setup request structure */
1206	cc_req.user_cb = cc_update_complete;
1207	cc_req.user_arg = req;
1208
1209	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1210	if (rc != -EINPROGRESS && rc != -EBUSY) {
1211		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1212		cc_unmap_hash_request(dev, state, req->src, true);
1213		cc_unmap_req(dev, state, ctx);
1214	}
1215	return rc;
1216}
1217
1218static int cc_mac_final(struct ahash_request *req)
1219{
1220	struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
1221	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1222	struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
1223	struct device *dev = drvdata_to_dev(ctx->drvdata);
1224	struct cc_crypto_req cc_req = {};
1225	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1226	int idx = 0;
1227	int rc = 0;
1228	u32 key_size, key_len;
1229	u32 digestsize = crypto_ahash_digestsize(tfm);
1230	gfp_t flags = cc_gfp_flags(&req->base);
1231	u32 rem_cnt = *cc_hash_buf_cnt(state);
1232
1233	if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1234		key_size = CC_AES_128_BIT_KEY_SIZE;
1235		key_len  = CC_AES_128_BIT_KEY_SIZE;
1236	} else {
1237		key_size = (ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
1238			ctx->key_params.keylen;
1239		key_len =  ctx->key_params.keylen;
1240	}
1241
1242	dev_dbg(dev, "===== final  xcbc reminder (%d) ====\n", rem_cnt);
1243
1244	if (cc_map_req(dev, state, ctx)) {
1245		dev_err(dev, "map_ahash_source() failed\n");
1246		return -EINVAL;
1247	}
1248
1249	if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
1250				      req->nbytes, 0, flags)) {
1251		dev_err(dev, "map_ahash_request_final() failed\n");
1252		cc_unmap_req(dev, state, ctx);
1253		return -ENOMEM;
1254	}
1255
1256	if (cc_map_result(dev, state, digestsize)) {
1257		dev_err(dev, "map_ahash_digest() failed\n");
1258		cc_unmap_hash_request(dev, state, req->src, true);
1259		cc_unmap_req(dev, state, ctx);
1260		return -ENOMEM;
1261	}
1262
1263	/* Setup request structure */
1264	cc_req.user_cb = cc_hash_complete;
1265	cc_req.user_arg = req;
1266
1267	if (state->xcbc_count && rem_cnt == 0) {
1268		/* Load key for ECB decryption */
1269		hw_desc_init(&desc[idx]);
1270		set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
1271		set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_DECRYPT);
1272		set_din_type(&desc[idx], DMA_DLLI,
1273			     (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET),
1274			     key_size, NS_BIT);
1275		set_key_size_aes(&desc[idx], key_len);
1276		set_flow_mode(&desc[idx], S_DIN_to_AES);
1277		set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1278		idx++;
1279
1280		/* Initiate decryption of block state to previous
1281		 * block_state-XOR-M[n]
1282		 */
1283		hw_desc_init(&desc[idx]);
1284		set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
1285			     CC_AES_BLOCK_SIZE, NS_BIT);
1286		set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
1287			      CC_AES_BLOCK_SIZE, NS_BIT, 0);
1288		set_flow_mode(&desc[idx], DIN_AES_DOUT);
1289		idx++;
1290
1291		/* Memory Barrier: wait for axi write to complete */
1292		hw_desc_init(&desc[idx]);
1293		set_din_no_dma(&desc[idx], 0, 0xfffff0);
1294		set_dout_no_dma(&desc[idx], 0, 0, 1);
1295		idx++;
1296	}
1297
1298	if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
1299		cc_setup_xcbc(req, desc, &idx);
1300	else
1301		cc_setup_cmac(req, desc, &idx);
1302
1303	if (state->xcbc_count == 0) {
1304		hw_desc_init(&desc[idx]);
1305		set_cipher_mode(&desc[idx], ctx->hw_mode);
1306		set_key_size_aes(&desc[idx], key_len);
1307		set_cmac_size0_mode(&desc[idx]);
1308		set_flow_mode(&desc[idx], S_DIN_to_AES);
1309		idx++;
1310	} else if (rem_cnt > 0) {
1311		cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1312	} else {
1313		hw_desc_init(&desc[idx]);
1314		set_din_const(&desc[idx], 0x00, CC_AES_BLOCK_SIZE);
1315		set_flow_mode(&desc[idx], DIN_AES_DOUT);
1316		idx++;
1317	}
1318
1319	/* Get final MAC result */
1320	hw_desc_init(&desc[idx]);
 
1321	set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1322		      digestsize, NS_BIT, 1);
1323	set_queue_last_ind(ctx->drvdata, &desc[idx]);
1324	set_flow_mode(&desc[idx], S_AES_to_DOUT);
1325	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1326	set_cipher_mode(&desc[idx], ctx->hw_mode);
1327	idx++;
1328
1329	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1330	if (rc != -EINPROGRESS && rc != -EBUSY) {
1331		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1332		cc_unmap_hash_request(dev, state, req->src, true);
1333		cc_unmap_result(dev, state, digestsize, req->result);
1334		cc_unmap_req(dev, state, ctx);
1335	}
1336	return rc;
1337}
1338
1339static int cc_mac_finup(struct ahash_request *req)
1340{
1341	struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
1342	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1343	struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
1344	struct device *dev = drvdata_to_dev(ctx->drvdata);
1345	struct cc_crypto_req cc_req = {};
1346	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1347	int idx = 0;
1348	int rc = 0;
1349	u32 key_len = 0;
1350	u32 digestsize = crypto_ahash_digestsize(tfm);
1351	gfp_t flags = cc_gfp_flags(&req->base);
1352
1353	dev_dbg(dev, "===== finup xcbc(%d) ====\n", req->nbytes);
1354	if (state->xcbc_count > 0 && req->nbytes == 0) {
1355		dev_dbg(dev, "No data to update. Call to fdx_mac_final\n");
1356		return cc_mac_final(req);
1357	}
1358
1359	if (cc_map_req(dev, state, ctx)) {
1360		dev_err(dev, "map_ahash_source() failed\n");
1361		return -EINVAL;
1362	}
1363
1364	if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
1365				      req->nbytes, 1, flags)) {
1366		dev_err(dev, "map_ahash_request_final() failed\n");
1367		cc_unmap_req(dev, state, ctx);
1368		return -ENOMEM;
1369	}
1370	if (cc_map_result(dev, state, digestsize)) {
1371		dev_err(dev, "map_ahash_digest() failed\n");
1372		cc_unmap_hash_request(dev, state, req->src, true);
1373		cc_unmap_req(dev, state, ctx);
1374		return -ENOMEM;
1375	}
1376
1377	/* Setup request structure */
1378	cc_req.user_cb = cc_hash_complete;
1379	cc_req.user_arg = req;
1380
1381	if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1382		key_len = CC_AES_128_BIT_KEY_SIZE;
1383		cc_setup_xcbc(req, desc, &idx);
1384	} else {
1385		key_len = ctx->key_params.keylen;
1386		cc_setup_cmac(req, desc, &idx);
1387	}
1388
1389	if (req->nbytes == 0) {
1390		hw_desc_init(&desc[idx]);
1391		set_cipher_mode(&desc[idx], ctx->hw_mode);
1392		set_key_size_aes(&desc[idx], key_len);
1393		set_cmac_size0_mode(&desc[idx]);
1394		set_flow_mode(&desc[idx], S_DIN_to_AES);
1395		idx++;
1396	} else {
1397		cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1398	}
1399
1400	/* Get final MAC result */
1401	hw_desc_init(&desc[idx]);
 
1402	set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1403		      digestsize, NS_BIT, 1);
1404	set_queue_last_ind(ctx->drvdata, &desc[idx]);
1405	set_flow_mode(&desc[idx], S_AES_to_DOUT);
1406	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1407	set_cipher_mode(&desc[idx], ctx->hw_mode);
1408	idx++;
1409
1410	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1411	if (rc != -EINPROGRESS && rc != -EBUSY) {
1412		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1413		cc_unmap_hash_request(dev, state, req->src, true);
1414		cc_unmap_result(dev, state, digestsize, req->result);
1415		cc_unmap_req(dev, state, ctx);
1416	}
1417	return rc;
1418}
1419
1420static int cc_mac_digest(struct ahash_request *req)
1421{
1422	struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
1423	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1424	struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
1425	struct device *dev = drvdata_to_dev(ctx->drvdata);
1426	u32 digestsize = crypto_ahash_digestsize(tfm);
1427	struct cc_crypto_req cc_req = {};
1428	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1429	u32 key_len;
1430	unsigned int idx = 0;
1431	int rc;
1432	gfp_t flags = cc_gfp_flags(&req->base);
1433
1434	dev_dbg(dev, "===== -digest mac (%d) ====\n",  req->nbytes);
1435
1436	cc_init_req(dev, state, ctx);
1437
1438	if (cc_map_req(dev, state, ctx)) {
1439		dev_err(dev, "map_ahash_source() failed\n");
1440		return -ENOMEM;
1441	}
1442	if (cc_map_result(dev, state, digestsize)) {
1443		dev_err(dev, "map_ahash_digest() failed\n");
1444		cc_unmap_req(dev, state, ctx);
1445		return -ENOMEM;
1446	}
1447
1448	if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
1449				      req->nbytes, 1, flags)) {
1450		dev_err(dev, "map_ahash_request_final() failed\n");
1451		cc_unmap_req(dev, state, ctx);
1452		return -ENOMEM;
1453	}
1454
1455	/* Setup request structure */
1456	cc_req.user_cb = cc_digest_complete;
1457	cc_req.user_arg = req;
1458
1459	if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1460		key_len = CC_AES_128_BIT_KEY_SIZE;
1461		cc_setup_xcbc(req, desc, &idx);
1462	} else {
1463		key_len = ctx->key_params.keylen;
1464		cc_setup_cmac(req, desc, &idx);
1465	}
1466
1467	if (req->nbytes == 0) {
1468		hw_desc_init(&desc[idx]);
1469		set_cipher_mode(&desc[idx], ctx->hw_mode);
1470		set_key_size_aes(&desc[idx], key_len);
1471		set_cmac_size0_mode(&desc[idx]);
1472		set_flow_mode(&desc[idx], S_DIN_to_AES);
1473		idx++;
1474	} else {
1475		cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1476	}
1477
1478	/* Get final MAC result */
1479	hw_desc_init(&desc[idx]);
1480	set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1481		      CC_AES_BLOCK_SIZE, NS_BIT, 1);
1482	set_queue_last_ind(ctx->drvdata, &desc[idx]);
1483	set_flow_mode(&desc[idx], S_AES_to_DOUT);
1484	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1485	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1486	set_cipher_mode(&desc[idx], ctx->hw_mode);
1487	idx++;
1488
1489	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1490	if (rc != -EINPROGRESS && rc != -EBUSY) {
1491		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1492		cc_unmap_hash_request(dev, state, req->src, true);
1493		cc_unmap_result(dev, state, digestsize, req->result);
1494		cc_unmap_req(dev, state, ctx);
1495	}
1496	return rc;
1497}
1498
1499static int cc_hash_export(struct ahash_request *req, void *out)
1500{
1501	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1502	struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1503	struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
1504	u8 *curr_buff = cc_hash_buf(state);
1505	u32 curr_buff_cnt = *cc_hash_buf_cnt(state);
1506	const u32 tmp = CC_EXPORT_MAGIC;
1507
1508	memcpy(out, &tmp, sizeof(u32));
1509	out += sizeof(u32);
1510
1511	memcpy(out, state->digest_buff, ctx->inter_digestsize);
1512	out += ctx->inter_digestsize;
1513
1514	memcpy(out, state->digest_bytes_len, ctx->hash_len);
1515	out += ctx->hash_len;
1516
1517	memcpy(out, &curr_buff_cnt, sizeof(u32));
1518	out += sizeof(u32);
1519
1520	memcpy(out, curr_buff, curr_buff_cnt);
1521
1522	return 0;
1523}
1524
1525static int cc_hash_import(struct ahash_request *req, const void *in)
1526{
1527	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1528	struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
1529	struct device *dev = drvdata_to_dev(ctx->drvdata);
1530	struct ahash_req_ctx *state = ahash_request_ctx_dma(req);
1531	u32 tmp;
1532
1533	memcpy(&tmp, in, sizeof(u32));
1534	if (tmp != CC_EXPORT_MAGIC)
1535		return -EINVAL;
1536	in += sizeof(u32);
1537
1538	cc_init_req(dev, state, ctx);
1539
1540	memcpy(state->digest_buff, in, ctx->inter_digestsize);
1541	in += ctx->inter_digestsize;
1542
1543	memcpy(state->digest_bytes_len, in, ctx->hash_len);
1544	in += ctx->hash_len;
1545
1546	/* Sanity check the data as much as possible */
1547	memcpy(&tmp, in, sizeof(u32));
1548	if (tmp > CC_MAX_HASH_BLCK_SIZE)
1549		return -EINVAL;
1550	in += sizeof(u32);
1551
1552	state->buf_cnt[0] = tmp;
1553	memcpy(state->buffers[0], in, tmp);
1554
1555	return 0;
1556}
1557
1558struct cc_hash_template {
1559	char name[CRYPTO_MAX_ALG_NAME];
1560	char driver_name[CRYPTO_MAX_ALG_NAME];
1561	char mac_name[CRYPTO_MAX_ALG_NAME];
1562	char mac_driver_name[CRYPTO_MAX_ALG_NAME];
1563	unsigned int blocksize;
1564	bool is_mac;
1565	bool synchronize;
1566	struct ahash_alg template_ahash;
1567	int hash_mode;
1568	int hw_mode;
1569	int inter_digestsize;
1570	struct cc_drvdata *drvdata;
1571	u32 min_hw_rev;
1572	enum cc_std_body std_body;
1573};
1574
1575#define CC_STATE_SIZE(_x) \
1576	((_x) + HASH_MAX_LEN_SIZE + CC_MAX_HASH_BLCK_SIZE + (2 * sizeof(u32)))
1577
1578/* hash descriptors */
1579static struct cc_hash_template driver_hash[] = {
1580	//Asynchronize hash template
1581	{
1582		.name = "sha1",
1583		.driver_name = "sha1-ccree",
1584		.mac_name = "hmac(sha1)",
1585		.mac_driver_name = "hmac-sha1-ccree",
1586		.blocksize = SHA1_BLOCK_SIZE,
1587		.is_mac = true,
1588		.synchronize = false,
1589		.template_ahash = {
1590			.init = cc_hash_init,
1591			.update = cc_hash_update,
1592			.final = cc_hash_final,
1593			.finup = cc_hash_finup,
1594			.digest = cc_hash_digest,
1595			.export = cc_hash_export,
1596			.import = cc_hash_import,
1597			.setkey = cc_hash_setkey,
1598			.halg = {
1599				.digestsize = SHA1_DIGEST_SIZE,
1600				.statesize = CC_STATE_SIZE(SHA1_DIGEST_SIZE),
1601			},
1602		},
1603		.hash_mode = DRV_HASH_SHA1,
1604		.hw_mode = DRV_HASH_HW_SHA1,
1605		.inter_digestsize = SHA1_DIGEST_SIZE,
1606		.min_hw_rev = CC_HW_REV_630,
1607		.std_body = CC_STD_NIST,
1608	},
1609	{
1610		.name = "sha256",
1611		.driver_name = "sha256-ccree",
1612		.mac_name = "hmac(sha256)",
1613		.mac_driver_name = "hmac-sha256-ccree",
1614		.blocksize = SHA256_BLOCK_SIZE,
1615		.is_mac = true,
1616		.template_ahash = {
1617			.init = cc_hash_init,
1618			.update = cc_hash_update,
1619			.final = cc_hash_final,
1620			.finup = cc_hash_finup,
1621			.digest = cc_hash_digest,
1622			.export = cc_hash_export,
1623			.import = cc_hash_import,
1624			.setkey = cc_hash_setkey,
1625			.halg = {
1626				.digestsize = SHA256_DIGEST_SIZE,
1627				.statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE)
1628			},
1629		},
1630		.hash_mode = DRV_HASH_SHA256,
1631		.hw_mode = DRV_HASH_HW_SHA256,
1632		.inter_digestsize = SHA256_DIGEST_SIZE,
1633		.min_hw_rev = CC_HW_REV_630,
1634		.std_body = CC_STD_NIST,
1635	},
1636	{
1637		.name = "sha224",
1638		.driver_name = "sha224-ccree",
1639		.mac_name = "hmac(sha224)",
1640		.mac_driver_name = "hmac-sha224-ccree",
1641		.blocksize = SHA224_BLOCK_SIZE,
1642		.is_mac = true,
1643		.template_ahash = {
1644			.init = cc_hash_init,
1645			.update = cc_hash_update,
1646			.final = cc_hash_final,
1647			.finup = cc_hash_finup,
1648			.digest = cc_hash_digest,
1649			.export = cc_hash_export,
1650			.import = cc_hash_import,
1651			.setkey = cc_hash_setkey,
1652			.halg = {
1653				.digestsize = SHA224_DIGEST_SIZE,
1654				.statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE),
1655			},
1656		},
1657		.hash_mode = DRV_HASH_SHA224,
1658		.hw_mode = DRV_HASH_HW_SHA256,
1659		.inter_digestsize = SHA256_DIGEST_SIZE,
1660		.min_hw_rev = CC_HW_REV_630,
1661		.std_body = CC_STD_NIST,
1662	},
1663	{
1664		.name = "sha384",
1665		.driver_name = "sha384-ccree",
1666		.mac_name = "hmac(sha384)",
1667		.mac_driver_name = "hmac-sha384-ccree",
1668		.blocksize = SHA384_BLOCK_SIZE,
1669		.is_mac = true,
1670		.template_ahash = {
1671			.init = cc_hash_init,
1672			.update = cc_hash_update,
1673			.final = cc_hash_final,
1674			.finup = cc_hash_finup,
1675			.digest = cc_hash_digest,
1676			.export = cc_hash_export,
1677			.import = cc_hash_import,
1678			.setkey = cc_hash_setkey,
1679			.halg = {
1680				.digestsize = SHA384_DIGEST_SIZE,
1681				.statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
1682			},
1683		},
1684		.hash_mode = DRV_HASH_SHA384,
1685		.hw_mode = DRV_HASH_HW_SHA512,
1686		.inter_digestsize = SHA512_DIGEST_SIZE,
1687		.min_hw_rev = CC_HW_REV_712,
1688		.std_body = CC_STD_NIST,
1689	},
1690	{
1691		.name = "sha512",
1692		.driver_name = "sha512-ccree",
1693		.mac_name = "hmac(sha512)",
1694		.mac_driver_name = "hmac-sha512-ccree",
1695		.blocksize = SHA512_BLOCK_SIZE,
1696		.is_mac = true,
1697		.template_ahash = {
1698			.init = cc_hash_init,
1699			.update = cc_hash_update,
1700			.final = cc_hash_final,
1701			.finup = cc_hash_finup,
1702			.digest = cc_hash_digest,
1703			.export = cc_hash_export,
1704			.import = cc_hash_import,
1705			.setkey = cc_hash_setkey,
1706			.halg = {
1707				.digestsize = SHA512_DIGEST_SIZE,
1708				.statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
1709			},
1710		},
1711		.hash_mode = DRV_HASH_SHA512,
1712		.hw_mode = DRV_HASH_HW_SHA512,
1713		.inter_digestsize = SHA512_DIGEST_SIZE,
1714		.min_hw_rev = CC_HW_REV_712,
1715		.std_body = CC_STD_NIST,
1716	},
1717	{
1718		.name = "md5",
1719		.driver_name = "md5-ccree",
1720		.mac_name = "hmac(md5)",
1721		.mac_driver_name = "hmac-md5-ccree",
1722		.blocksize = MD5_HMAC_BLOCK_SIZE,
1723		.is_mac = true,
1724		.template_ahash = {
1725			.init = cc_hash_init,
1726			.update = cc_hash_update,
1727			.final = cc_hash_final,
1728			.finup = cc_hash_finup,
1729			.digest = cc_hash_digest,
1730			.export = cc_hash_export,
1731			.import = cc_hash_import,
1732			.setkey = cc_hash_setkey,
1733			.halg = {
1734				.digestsize = MD5_DIGEST_SIZE,
1735				.statesize = CC_STATE_SIZE(MD5_DIGEST_SIZE),
1736			},
1737		},
1738		.hash_mode = DRV_HASH_MD5,
1739		.hw_mode = DRV_HASH_HW_MD5,
1740		.inter_digestsize = MD5_DIGEST_SIZE,
1741		.min_hw_rev = CC_HW_REV_630,
1742		.std_body = CC_STD_NIST,
1743	},
1744	{
1745		.name = "sm3",
1746		.driver_name = "sm3-ccree",
1747		.blocksize = SM3_BLOCK_SIZE,
1748		.is_mac = false,
1749		.template_ahash = {
1750			.init = cc_hash_init,
1751			.update = cc_hash_update,
1752			.final = cc_hash_final,
1753			.finup = cc_hash_finup,
1754			.digest = cc_hash_digest,
1755			.export = cc_hash_export,
1756			.import = cc_hash_import,
1757			.setkey = cc_hash_setkey,
1758			.halg = {
1759				.digestsize = SM3_DIGEST_SIZE,
1760				.statesize = CC_STATE_SIZE(SM3_DIGEST_SIZE),
1761			},
1762		},
1763		.hash_mode = DRV_HASH_SM3,
1764		.hw_mode = DRV_HASH_HW_SM3,
1765		.inter_digestsize = SM3_DIGEST_SIZE,
1766		.min_hw_rev = CC_HW_REV_713,
1767		.std_body = CC_STD_OSCCA,
1768	},
1769	{
1770		.mac_name = "xcbc(aes)",
1771		.mac_driver_name = "xcbc-aes-ccree",
1772		.blocksize = AES_BLOCK_SIZE,
1773		.is_mac = true,
1774		.template_ahash = {
1775			.init = cc_hash_init,
1776			.update = cc_mac_update,
1777			.final = cc_mac_final,
1778			.finup = cc_mac_finup,
1779			.digest = cc_mac_digest,
1780			.setkey = cc_xcbc_setkey,
1781			.export = cc_hash_export,
1782			.import = cc_hash_import,
1783			.halg = {
1784				.digestsize = AES_BLOCK_SIZE,
1785				.statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
1786			},
1787		},
1788		.hash_mode = DRV_HASH_NULL,
1789		.hw_mode = DRV_CIPHER_XCBC_MAC,
1790		.inter_digestsize = AES_BLOCK_SIZE,
1791		.min_hw_rev = CC_HW_REV_630,
1792		.std_body = CC_STD_NIST,
1793	},
1794	{
1795		.mac_name = "cmac(aes)",
1796		.mac_driver_name = "cmac-aes-ccree",
1797		.blocksize = AES_BLOCK_SIZE,
1798		.is_mac = true,
1799		.template_ahash = {
1800			.init = cc_hash_init,
1801			.update = cc_mac_update,
1802			.final = cc_mac_final,
1803			.finup = cc_mac_finup,
1804			.digest = cc_mac_digest,
1805			.setkey = cc_cmac_setkey,
1806			.export = cc_hash_export,
1807			.import = cc_hash_import,
1808			.halg = {
1809				.digestsize = AES_BLOCK_SIZE,
1810				.statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
1811			},
1812		},
1813		.hash_mode = DRV_HASH_NULL,
1814		.hw_mode = DRV_CIPHER_CMAC,
1815		.inter_digestsize = AES_BLOCK_SIZE,
1816		.min_hw_rev = CC_HW_REV_630,
1817		.std_body = CC_STD_NIST,
1818	},
1819};
1820
1821static struct cc_hash_alg *cc_alloc_hash_alg(struct cc_hash_template *template,
1822					     struct device *dev, bool keyed)
1823{
1824	struct cc_hash_alg *t_crypto_alg;
1825	struct crypto_alg *alg;
1826	struct ahash_alg *halg;
1827
1828	t_crypto_alg = devm_kzalloc(dev, sizeof(*t_crypto_alg), GFP_KERNEL);
1829	if (!t_crypto_alg)
1830		return ERR_PTR(-ENOMEM);
1831
1832	t_crypto_alg->ahash_alg = template->template_ahash;
1833	halg = &t_crypto_alg->ahash_alg;
1834	alg = &halg->halg.base;
1835
1836	if (keyed) {
1837		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1838			 template->mac_name);
1839		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1840			 template->mac_driver_name);
1841	} else {
1842		halg->setkey = NULL;
1843		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1844			 template->name);
1845		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1846			 template->driver_name);
1847	}
1848	alg->cra_module = THIS_MODULE;
1849	alg->cra_ctxsize = sizeof(struct cc_hash_ctx) + crypto_dma_padding();
1850	alg->cra_priority = CC_CRA_PRIO;
1851	alg->cra_blocksize = template->blocksize;
1852	alg->cra_alignmask = 0;
1853	alg->cra_exit = cc_cra_exit;
1854
1855	alg->cra_init = cc_cra_init;
1856	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
1857
1858	t_crypto_alg->hash_mode = template->hash_mode;
1859	t_crypto_alg->hw_mode = template->hw_mode;
1860	t_crypto_alg->inter_digestsize = template->inter_digestsize;
1861
1862	return t_crypto_alg;
1863}
1864
1865static int cc_init_copy_sram(struct cc_drvdata *drvdata, const u32 *data,
1866			     unsigned int size, u32 *sram_buff_ofs)
1867{
1868	struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)];
1869	unsigned int larval_seq_len = 0;
1870	int rc;
1871
1872	cc_set_sram_desc(data, *sram_buff_ofs, size / sizeof(*data),
1873			 larval_seq, &larval_seq_len);
1874	rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1875	if (rc)
1876		return rc;
1877
1878	*sram_buff_ofs += size;
1879	return 0;
1880}
1881
1882int cc_init_hash_sram(struct cc_drvdata *drvdata)
1883{
1884	struct cc_hash_handle *hash_handle = drvdata->hash_handle;
1885	u32 sram_buff_ofs = hash_handle->digest_len_sram_addr;
 
 
1886	bool large_sha_supported = (drvdata->hw_rev >= CC_HW_REV_712);
1887	bool sm3_supported = (drvdata->hw_rev >= CC_HW_REV_713);
1888	int rc = 0;
1889
1890	/* Copy-to-sram digest-len */
1891	rc = cc_init_copy_sram(drvdata, cc_digest_len_init,
1892			       sizeof(cc_digest_len_init), &sram_buff_ofs);
 
 
1893	if (rc)
1894		goto init_digest_const_err;
1895
 
 
 
1896	if (large_sha_supported) {
1897		/* Copy-to-sram digest-len for sha384/512 */
1898		rc = cc_init_copy_sram(drvdata, cc_digest_len_sha512_init,
1899				       sizeof(cc_digest_len_sha512_init),
1900				       &sram_buff_ofs);
 
1901		if (rc)
1902			goto init_digest_const_err;
 
 
 
1903	}
1904
1905	/* The initial digests offset */
1906	hash_handle->larval_digest_sram_addr = sram_buff_ofs;
1907
1908	/* Copy-to-sram initial SHA* digests */
1909	rc = cc_init_copy_sram(drvdata, cc_md5_init, sizeof(cc_md5_init),
1910			       &sram_buff_ofs);
 
1911	if (rc)
1912		goto init_digest_const_err;
 
 
1913
1914	rc = cc_init_copy_sram(drvdata, cc_sha1_init, sizeof(cc_sha1_init),
1915			       &sram_buff_ofs);
 
 
1916	if (rc)
1917		goto init_digest_const_err;
 
 
1918
1919	rc = cc_init_copy_sram(drvdata, cc_sha224_init, sizeof(cc_sha224_init),
1920			       &sram_buff_ofs);
 
 
1921	if (rc)
1922		goto init_digest_const_err;
 
 
1923
1924	rc = cc_init_copy_sram(drvdata, cc_sha256_init, sizeof(cc_sha256_init),
1925			       &sram_buff_ofs);
 
 
1926	if (rc)
1927		goto init_digest_const_err;
 
 
1928
1929	if (sm3_supported) {
1930		rc = cc_init_copy_sram(drvdata, cc_sm3_init,
1931				       sizeof(cc_sm3_init), &sram_buff_ofs);
 
 
1932		if (rc)
1933			goto init_digest_const_err;
 
 
1934	}
1935
1936	if (large_sha_supported) {
1937		rc = cc_init_copy_sram(drvdata, cc_sha384_init,
1938				       sizeof(cc_sha384_init), &sram_buff_ofs);
 
 
1939		if (rc)
1940			goto init_digest_const_err;
 
 
1941
1942		rc = cc_init_copy_sram(drvdata, cc_sha512_init,
1943				       sizeof(cc_sha512_init), &sram_buff_ofs);
 
 
1944		if (rc)
1945			goto init_digest_const_err;
1946	}
1947
1948init_digest_const_err:
1949	return rc;
1950}
1951
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1952int cc_hash_alloc(struct cc_drvdata *drvdata)
1953{
1954	struct cc_hash_handle *hash_handle;
1955	u32 sram_buff;
1956	u32 sram_size_to_alloc;
1957	struct device *dev = drvdata_to_dev(drvdata);
1958	int rc = 0;
1959	int alg;
1960
1961	hash_handle = devm_kzalloc(dev, sizeof(*hash_handle), GFP_KERNEL);
1962	if (!hash_handle)
1963		return -ENOMEM;
1964
1965	INIT_LIST_HEAD(&hash_handle->hash_list);
1966	drvdata->hash_handle = hash_handle;
1967
1968	sram_size_to_alloc = sizeof(cc_digest_len_init) +
1969			sizeof(cc_md5_init) +
1970			sizeof(cc_sha1_init) +
1971			sizeof(cc_sha224_init) +
1972			sizeof(cc_sha256_init);
1973
1974	if (drvdata->hw_rev >= CC_HW_REV_713)
1975		sram_size_to_alloc += sizeof(cc_sm3_init);
1976
1977	if (drvdata->hw_rev >= CC_HW_REV_712)
1978		sram_size_to_alloc += sizeof(cc_digest_len_sha512_init) +
1979			sizeof(cc_sha384_init) + sizeof(cc_sha512_init);
1980
1981	sram_buff = cc_sram_alloc(drvdata, sram_size_to_alloc);
1982	if (sram_buff == NULL_SRAM_ADDR) {
 
1983		rc = -ENOMEM;
1984		goto fail;
1985	}
1986
1987	/* The initial digest-len offset */
1988	hash_handle->digest_len_sram_addr = sram_buff;
1989
1990	/*must be set before the alg registration as it is being used there*/
1991	rc = cc_init_hash_sram(drvdata);
1992	if (rc) {
1993		dev_err(dev, "Init digest CONST failed (rc=%d)\n", rc);
1994		goto fail;
1995	}
1996
1997	/* ahash registration */
1998	for (alg = 0; alg < ARRAY_SIZE(driver_hash); alg++) {
1999		struct cc_hash_alg *t_alg;
2000		int hw_mode = driver_hash[alg].hw_mode;
2001
2002		/* Check that the HW revision and variants are suitable */
2003		if ((driver_hash[alg].min_hw_rev > drvdata->hw_rev) ||
2004		    !(drvdata->std_bodies & driver_hash[alg].std_body))
2005			continue;
2006
2007		if (driver_hash[alg].is_mac) {
2008			/* register hmac version */
2009			t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, true);
2010			if (IS_ERR(t_alg)) {
2011				rc = PTR_ERR(t_alg);
2012				dev_err(dev, "%s alg allocation failed\n",
2013					driver_hash[alg].driver_name);
2014				goto fail;
2015			}
2016			t_alg->drvdata = drvdata;
2017
2018			rc = crypto_register_ahash(&t_alg->ahash_alg);
2019			if (rc) {
2020				dev_err(dev, "%s alg registration failed\n",
2021					driver_hash[alg].driver_name);
 
2022				goto fail;
 
 
 
2023			}
2024
2025			list_add_tail(&t_alg->entry, &hash_handle->hash_list);
2026		}
2027		if (hw_mode == DRV_CIPHER_XCBC_MAC ||
2028		    hw_mode == DRV_CIPHER_CMAC)
2029			continue;
2030
2031		/* register hash version */
2032		t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, false);
2033		if (IS_ERR(t_alg)) {
2034			rc = PTR_ERR(t_alg);
2035			dev_err(dev, "%s alg allocation failed\n",
2036				driver_hash[alg].driver_name);
2037			goto fail;
2038		}
2039		t_alg->drvdata = drvdata;
2040
2041		rc = crypto_register_ahash(&t_alg->ahash_alg);
2042		if (rc) {
2043			dev_err(dev, "%s alg registration failed\n",
2044				driver_hash[alg].driver_name);
 
2045			goto fail;
 
 
2046		}
2047
2048		list_add_tail(&t_alg->entry, &hash_handle->hash_list);
2049	}
2050
2051	return 0;
2052
2053fail:
2054	cc_hash_free(drvdata);
 
2055	return rc;
2056}
2057
2058int cc_hash_free(struct cc_drvdata *drvdata)
2059{
2060	struct cc_hash_alg *t_hash_alg, *hash_n;
2061	struct cc_hash_handle *hash_handle = drvdata->hash_handle;
2062
2063	list_for_each_entry_safe(t_hash_alg, hash_n, &hash_handle->hash_list,
2064				 entry) {
2065		crypto_unregister_ahash(&t_hash_alg->ahash_alg);
2066		list_del(&t_hash_alg->entry);
2067	}
 
 
2068
 
 
 
2069	return 0;
2070}
2071
2072static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
2073			  unsigned int *seq_size)
2074{
2075	unsigned int idx = *seq_size;
2076	struct ahash_req_ctx *state = ahash_request_ctx_dma(areq);
2077	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2078	struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
2079
2080	/* Setup XCBC MAC K1 */
2081	hw_desc_init(&desc[idx]);
2082	set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
2083					    XCBC_MAC_K1_OFFSET),
2084		     CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2085	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
2086	set_hash_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC, ctx->hash_mode);
2087	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2088	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2089	set_flow_mode(&desc[idx], S_DIN_to_AES);
2090	idx++;
2091
2092	/* Setup XCBC MAC K2 */
2093	hw_desc_init(&desc[idx]);
2094	set_din_type(&desc[idx], DMA_DLLI,
2095		     (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET),
2096		     CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2097	set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
2098	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2099	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2100	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2101	set_flow_mode(&desc[idx], S_DIN_to_AES);
2102	idx++;
2103
2104	/* Setup XCBC MAC K3 */
2105	hw_desc_init(&desc[idx]);
2106	set_din_type(&desc[idx], DMA_DLLI,
2107		     (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET),
2108		     CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2109	set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
2110	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2111	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2112	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2113	set_flow_mode(&desc[idx], S_DIN_to_AES);
2114	idx++;
2115
2116	/* Loading MAC state */
2117	hw_desc_init(&desc[idx]);
2118	set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
2119		     CC_AES_BLOCK_SIZE, NS_BIT);
2120	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
2121	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2122	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2123	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2124	set_flow_mode(&desc[idx], S_DIN_to_AES);
2125	idx++;
2126	*seq_size = idx;
2127}
2128
2129static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[],
2130			  unsigned int *seq_size)
2131{
2132	unsigned int idx = *seq_size;
2133	struct ahash_req_ctx *state = ahash_request_ctx_dma(areq);
2134	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2135	struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm);
2136
2137	/* Setup CMAC Key */
2138	hw_desc_init(&desc[idx]);
2139	set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
2140		     ((ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
2141		      ctx->key_params.keylen), NS_BIT);
2142	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
2143	set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
2144	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2145	set_key_size_aes(&desc[idx], ctx->key_params.keylen);
2146	set_flow_mode(&desc[idx], S_DIN_to_AES);
2147	idx++;
2148
2149	/* Load MAC state */
2150	hw_desc_init(&desc[idx]);
2151	set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
2152		     CC_AES_BLOCK_SIZE, NS_BIT);
2153	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
2154	set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
2155	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2156	set_key_size_aes(&desc[idx], ctx->key_params.keylen);
2157	set_flow_mode(&desc[idx], S_DIN_to_AES);
2158	idx++;
2159	*seq_size = idx;
2160}
2161
2162static void cc_set_desc(struct ahash_req_ctx *areq_ctx,
2163			struct cc_hash_ctx *ctx, unsigned int flow_mode,
2164			struct cc_hw_desc desc[], bool is_not_last_data,
2165			unsigned int *seq_size)
2166{
2167	unsigned int idx = *seq_size;
2168	struct device *dev = drvdata_to_dev(ctx->drvdata);
2169
2170	if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_DLLI) {
2171		hw_desc_init(&desc[idx]);
2172		set_din_type(&desc[idx], DMA_DLLI,
2173			     sg_dma_address(areq_ctx->curr_sg),
2174			     areq_ctx->curr_sg->length, NS_BIT);
2175		set_flow_mode(&desc[idx], flow_mode);
2176		idx++;
2177	} else {
2178		if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
2179			dev_dbg(dev, " NULL mode\n");
2180			/* nothing to build */
2181			return;
2182		}
2183		/* bypass */
2184		hw_desc_init(&desc[idx]);
2185		set_din_type(&desc[idx], DMA_DLLI,
2186			     areq_ctx->mlli_params.mlli_dma_addr,
2187			     areq_ctx->mlli_params.mlli_len, NS_BIT);
2188		set_dout_sram(&desc[idx], ctx->drvdata->mlli_sram_addr,
2189			      areq_ctx->mlli_params.mlli_len);
2190		set_flow_mode(&desc[idx], BYPASS);
2191		idx++;
2192		/* process */
2193		hw_desc_init(&desc[idx]);
2194		set_din_type(&desc[idx], DMA_MLLI,
2195			     ctx->drvdata->mlli_sram_addr,
2196			     areq_ctx->mlli_nents, NS_BIT);
2197		set_flow_mode(&desc[idx], flow_mode);
2198		idx++;
2199	}
2200	if (is_not_last_data)
2201		set_din_not_last_indication(&desc[(idx - 1)]);
2202	/* return updated desc sequence size */
2203	*seq_size = idx;
2204}
2205
2206static const void *cc_larval_digest(struct device *dev, u32 mode)
2207{
2208	switch (mode) {
2209	case DRV_HASH_MD5:
2210		return cc_md5_init;
2211	case DRV_HASH_SHA1:
2212		return cc_sha1_init;
2213	case DRV_HASH_SHA224:
2214		return cc_sha224_init;
2215	case DRV_HASH_SHA256:
2216		return cc_sha256_init;
2217	case DRV_HASH_SHA384:
2218		return cc_sha384_init;
2219	case DRV_HASH_SHA512:
2220		return cc_sha512_init;
2221	case DRV_HASH_SM3:
2222		return cc_sm3_init;
2223	default:
2224		dev_err(dev, "Invalid hash mode (%d)\n", mode);
2225		return cc_md5_init;
2226	}
2227}
2228
2229/**
2230 * cc_larval_digest_addr() - Get the address of the initial digest in SRAM
2231 * according to the given hash mode
2232 *
2233 * @drvdata: Associated device driver context
2234 * @mode: The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256
2235 *
2236 * Return:
2237 * The address of the initial digest in SRAM
2238 */
2239u32 cc_larval_digest_addr(void *drvdata, u32 mode)
2240{
2241	struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
2242	struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
2243	struct device *dev = drvdata_to_dev(_drvdata);
2244	bool sm3_supported = (_drvdata->hw_rev >= CC_HW_REV_713);
2245	u32 addr;
2246
2247	switch (mode) {
2248	case DRV_HASH_NULL:
2249		break; /*Ignore*/
2250	case DRV_HASH_MD5:
2251		return (hash_handle->larval_digest_sram_addr);
2252	case DRV_HASH_SHA1:
2253		return (hash_handle->larval_digest_sram_addr +
2254			sizeof(cc_md5_init));
2255	case DRV_HASH_SHA224:
2256		return (hash_handle->larval_digest_sram_addr +
2257			sizeof(cc_md5_init) +
2258			sizeof(cc_sha1_init));
2259	case DRV_HASH_SHA256:
2260		return (hash_handle->larval_digest_sram_addr +
2261			sizeof(cc_md5_init) +
2262			sizeof(cc_sha1_init) +
2263			sizeof(cc_sha224_init));
2264	case DRV_HASH_SM3:
2265		return (hash_handle->larval_digest_sram_addr +
2266			sizeof(cc_md5_init) +
2267			sizeof(cc_sha1_init) +
2268			sizeof(cc_sha224_init) +
2269			sizeof(cc_sha256_init));
2270	case DRV_HASH_SHA384:
2271		addr = (hash_handle->larval_digest_sram_addr +
2272			sizeof(cc_md5_init) +
2273			sizeof(cc_sha1_init) +
2274			sizeof(cc_sha224_init) +
2275			sizeof(cc_sha256_init));
2276		if (sm3_supported)
2277			addr += sizeof(cc_sm3_init);
2278		return addr;
2279	case DRV_HASH_SHA512:
2280		addr = (hash_handle->larval_digest_sram_addr +
2281			sizeof(cc_md5_init) +
2282			sizeof(cc_sha1_init) +
2283			sizeof(cc_sha224_init) +
2284			sizeof(cc_sha256_init) +
2285			sizeof(cc_sha384_init));
2286		if (sm3_supported)
2287			addr += sizeof(cc_sm3_init);
2288		return addr;
2289	default:
2290		dev_err(dev, "Invalid hash mode (%d)\n", mode);
2291	}
2292
2293	/*This is valid wrong value to avoid kernel crash*/
2294	return hash_handle->larval_digest_sram_addr;
2295}
2296
2297u32 cc_digest_len_addr(void *drvdata, u32 mode)
 
2298{
2299	struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
2300	struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
2301	u32 digest_len_addr = hash_handle->digest_len_sram_addr;
2302
2303	switch (mode) {
2304	case DRV_HASH_SHA1:
2305	case DRV_HASH_SHA224:
2306	case DRV_HASH_SHA256:
2307	case DRV_HASH_MD5:
2308		return digest_len_addr;
 
2309	case DRV_HASH_SHA384:
2310	case DRV_HASH_SHA512:
2311		return  digest_len_addr + sizeof(cc_digest_len_init);
 
2312	default:
2313		return digest_len_addr; /*to avoid kernel crash*/
2314	}
2315}
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
   3
   4#include <linux/kernel.h>
   5#include <linux/module.h>
   6#include <crypto/algapi.h>
   7#include <crypto/hash.h>
   8#include <crypto/md5.h>
   9#include <crypto/sm3.h>
  10#include <crypto/internal/hash.h>
  11
  12#include "cc_driver.h"
  13#include "cc_request_mgr.h"
  14#include "cc_buffer_mgr.h"
  15#include "cc_hash.h"
  16#include "cc_sram_mgr.h"
  17
  18#define CC_MAX_HASH_SEQ_LEN 12
  19#define CC_MAX_OPAD_KEYS_SIZE CC_MAX_HASH_BLCK_SIZE
  20#define CC_SM3_HASH_LEN_SIZE 8
  21
  22struct cc_hash_handle {
  23	cc_sram_addr_t digest_len_sram_addr; /* const value in SRAM*/
  24	cc_sram_addr_t larval_digest_sram_addr;   /* const value in SRAM */
  25	struct list_head hash_list;
  26};
  27
  28static const u32 cc_digest_len_init[] = {
  29	0x00000040, 0x00000000, 0x00000000, 0x00000000 };
  30static const u32 cc_md5_init[] = {
  31	SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
  32static const u32 cc_sha1_init[] = {
  33	SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
  34static const u32 cc_sha224_init[] = {
  35	SHA224_H7, SHA224_H6, SHA224_H5, SHA224_H4,
  36	SHA224_H3, SHA224_H2, SHA224_H1, SHA224_H0 };
  37static const u32 cc_sha256_init[] = {
  38	SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4,
  39	SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
  40static const u32 cc_digest_len_sha512_init[] = {
  41	0x00000080, 0x00000000, 0x00000000, 0x00000000 };
  42static u64 cc_sha384_init[] = {
  43	SHA384_H7, SHA384_H6, SHA384_H5, SHA384_H4,
  44	SHA384_H3, SHA384_H2, SHA384_H1, SHA384_H0 };
  45static u64 cc_sha512_init[] = {
  46	SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4,
  47	SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 };
 
 
 
 
 
 
 
  48static const u32 cc_sm3_init[] = {
  49	SM3_IVH, SM3_IVG, SM3_IVF, SM3_IVE,
  50	SM3_IVD, SM3_IVC, SM3_IVB, SM3_IVA };
  51
  52static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
  53			  unsigned int *seq_size);
  54
  55static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[],
  56			  unsigned int *seq_size);
  57
  58static const void *cc_larval_digest(struct device *dev, u32 mode);
  59
  60struct cc_hash_alg {
  61	struct list_head entry;
  62	int hash_mode;
  63	int hw_mode;
  64	int inter_digestsize;
  65	struct cc_drvdata *drvdata;
  66	struct ahash_alg ahash_alg;
  67};
  68
  69struct hash_key_req_ctx {
  70	u32 keylen;
  71	dma_addr_t key_dma_addr;
  72	u8 *key;
  73};
  74
  75/* hash per-session context */
  76struct cc_hash_ctx {
  77	struct cc_drvdata *drvdata;
  78	/* holds the origin digest; the digest after "setkey" if HMAC,*
  79	 * the initial digest if HASH.
  80	 */
  81	u8 digest_buff[CC_MAX_HASH_DIGEST_SIZE]  ____cacheline_aligned;
  82	u8 opad_tmp_keys_buff[CC_MAX_OPAD_KEYS_SIZE]  ____cacheline_aligned;
  83
  84	dma_addr_t opad_tmp_keys_dma_addr  ____cacheline_aligned;
  85	dma_addr_t digest_buff_dma_addr;
  86	/* use for hmac with key large then mode block size */
  87	struct hash_key_req_ctx key_params;
  88	int hash_mode;
  89	int hw_mode;
  90	int inter_digestsize;
  91	unsigned int hash_len;
  92	struct completion setkey_comp;
  93	bool is_hmac;
  94};
  95
  96static void cc_set_desc(struct ahash_req_ctx *areq_ctx, struct cc_hash_ctx *ctx,
  97			unsigned int flow_mode, struct cc_hw_desc desc[],
  98			bool is_not_last_data, unsigned int *seq_size);
  99
 100static void cc_set_endianity(u32 mode, struct cc_hw_desc *desc)
 101{
 102	if (mode == DRV_HASH_MD5 || mode == DRV_HASH_SHA384 ||
 103	    mode == DRV_HASH_SHA512) {
 104		set_bytes_swap(desc, 1);
 105	} else {
 106		set_cipher_config0(desc, HASH_DIGEST_RESULT_LITTLE_ENDIAN);
 107	}
 108}
 109
 110static int cc_map_result(struct device *dev, struct ahash_req_ctx *state,
 111			 unsigned int digestsize)
 112{
 113	state->digest_result_dma_addr =
 114		dma_map_single(dev, state->digest_result_buff,
 115			       digestsize, DMA_BIDIRECTIONAL);
 116	if (dma_mapping_error(dev, state->digest_result_dma_addr)) {
 117		dev_err(dev, "Mapping digest result buffer %u B for DMA failed\n",
 118			digestsize);
 119		return -ENOMEM;
 120	}
 121	dev_dbg(dev, "Mapped digest result buffer %u B at va=%pK to dma=%pad\n",
 122		digestsize, state->digest_result_buff,
 123		&state->digest_result_dma_addr);
 124
 125	return 0;
 126}
 127
 128static void cc_init_req(struct device *dev, struct ahash_req_ctx *state,
 129			struct cc_hash_ctx *ctx)
 130{
 131	bool is_hmac = ctx->is_hmac;
 132
 133	memset(state, 0, sizeof(*state));
 134
 135	if (is_hmac) {
 136		if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC &&
 137		    ctx->hw_mode != DRV_CIPHER_CMAC) {
 138			dma_sync_single_for_cpu(dev, ctx->digest_buff_dma_addr,
 139						ctx->inter_digestsize,
 140						DMA_BIDIRECTIONAL);
 141
 142			memcpy(state->digest_buff, ctx->digest_buff,
 143			       ctx->inter_digestsize);
 144			if (ctx->hash_mode == DRV_HASH_SHA512 ||
 145			    ctx->hash_mode == DRV_HASH_SHA384)
 146				memcpy(state->digest_bytes_len,
 147				       cc_digest_len_sha512_init,
 148				       ctx->hash_len);
 149			else
 150				memcpy(state->digest_bytes_len,
 151				       cc_digest_len_init,
 152				       ctx->hash_len);
 153		}
 154
 155		if (ctx->hash_mode != DRV_HASH_NULL) {
 156			dma_sync_single_for_cpu(dev,
 157						ctx->opad_tmp_keys_dma_addr,
 158						ctx->inter_digestsize,
 159						DMA_BIDIRECTIONAL);
 160			memcpy(state->opad_digest_buff,
 161			       ctx->opad_tmp_keys_buff, ctx->inter_digestsize);
 162		}
 163	} else { /*hash*/
 164		/* Copy the initial digests if hash flow. */
 165		const void *larval = cc_larval_digest(dev, ctx->hash_mode);
 166
 167		memcpy(state->digest_buff, larval, ctx->inter_digestsize);
 168	}
 169}
 170
 171static int cc_map_req(struct device *dev, struct ahash_req_ctx *state,
 172		      struct cc_hash_ctx *ctx)
 173{
 174	bool is_hmac = ctx->is_hmac;
 175
 176	state->digest_buff_dma_addr =
 177		dma_map_single(dev, state->digest_buff,
 178			       ctx->inter_digestsize, DMA_BIDIRECTIONAL);
 179	if (dma_mapping_error(dev, state->digest_buff_dma_addr)) {
 180		dev_err(dev, "Mapping digest len %d B at va=%pK for DMA failed\n",
 181			ctx->inter_digestsize, state->digest_buff);
 182		return -EINVAL;
 183	}
 184	dev_dbg(dev, "Mapped digest %d B at va=%pK to dma=%pad\n",
 185		ctx->inter_digestsize, state->digest_buff,
 186		&state->digest_buff_dma_addr);
 187
 188	if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
 189		state->digest_bytes_len_dma_addr =
 190			dma_map_single(dev, state->digest_bytes_len,
 191				       HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
 192		if (dma_mapping_error(dev, state->digest_bytes_len_dma_addr)) {
 193			dev_err(dev, "Mapping digest len %u B at va=%pK for DMA failed\n",
 194				HASH_MAX_LEN_SIZE, state->digest_bytes_len);
 195			goto unmap_digest_buf;
 196		}
 197		dev_dbg(dev, "Mapped digest len %u B at va=%pK to dma=%pad\n",
 198			HASH_MAX_LEN_SIZE, state->digest_bytes_len,
 199			&state->digest_bytes_len_dma_addr);
 200	}
 201
 202	if (is_hmac && ctx->hash_mode != DRV_HASH_NULL) {
 203		state->opad_digest_dma_addr =
 204			dma_map_single(dev, state->opad_digest_buff,
 205				       ctx->inter_digestsize,
 206				       DMA_BIDIRECTIONAL);
 207		if (dma_mapping_error(dev, state->opad_digest_dma_addr)) {
 208			dev_err(dev, "Mapping opad digest %d B at va=%pK for DMA failed\n",
 209				ctx->inter_digestsize,
 210				state->opad_digest_buff);
 211			goto unmap_digest_len;
 212		}
 213		dev_dbg(dev, "Mapped opad digest %d B at va=%pK to dma=%pad\n",
 214			ctx->inter_digestsize, state->opad_digest_buff,
 215			&state->opad_digest_dma_addr);
 216	}
 217
 218	return 0;
 219
 220unmap_digest_len:
 221	if (state->digest_bytes_len_dma_addr) {
 222		dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
 223				 HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
 224		state->digest_bytes_len_dma_addr = 0;
 225	}
 226unmap_digest_buf:
 227	if (state->digest_buff_dma_addr) {
 228		dma_unmap_single(dev, state->digest_buff_dma_addr,
 229				 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
 230		state->digest_buff_dma_addr = 0;
 231	}
 232
 233	return -EINVAL;
 234}
 235
 236static void cc_unmap_req(struct device *dev, struct ahash_req_ctx *state,
 237			 struct cc_hash_ctx *ctx)
 238{
 239	if (state->digest_buff_dma_addr) {
 240		dma_unmap_single(dev, state->digest_buff_dma_addr,
 241				 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
 242		dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
 243			&state->digest_buff_dma_addr);
 244		state->digest_buff_dma_addr = 0;
 245	}
 246	if (state->digest_bytes_len_dma_addr) {
 247		dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
 248				 HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
 249		dev_dbg(dev, "Unmapped digest-bytes-len buffer: digest_bytes_len_dma_addr=%pad\n",
 250			&state->digest_bytes_len_dma_addr);
 251		state->digest_bytes_len_dma_addr = 0;
 252	}
 253	if (state->opad_digest_dma_addr) {
 254		dma_unmap_single(dev, state->opad_digest_dma_addr,
 255				 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
 256		dev_dbg(dev, "Unmapped opad-digest: opad_digest_dma_addr=%pad\n",
 257			&state->opad_digest_dma_addr);
 258		state->opad_digest_dma_addr = 0;
 259	}
 260}
 261
 262static void cc_unmap_result(struct device *dev, struct ahash_req_ctx *state,
 263			    unsigned int digestsize, u8 *result)
 264{
 265	if (state->digest_result_dma_addr) {
 266		dma_unmap_single(dev, state->digest_result_dma_addr, digestsize,
 267				 DMA_BIDIRECTIONAL);
 268		dev_dbg(dev, "unmpa digest result buffer va (%pK) pa (%pad) len %u\n",
 269			state->digest_result_buff,
 270			&state->digest_result_dma_addr, digestsize);
 271		memcpy(result, state->digest_result_buff, digestsize);
 272	}
 273	state->digest_result_dma_addr = 0;
 274}
 275
 276static void cc_update_complete(struct device *dev, void *cc_req, int err)
 277{
 278	struct ahash_request *req = (struct ahash_request *)cc_req;
 279	struct ahash_req_ctx *state = ahash_request_ctx(req);
 280	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 281	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 282
 283	dev_dbg(dev, "req=%pK\n", req);
 284
 285	if (err != -EINPROGRESS) {
 286		/* Not a BACKLOG notification */
 287		cc_unmap_hash_request(dev, state, req->src, false);
 288		cc_unmap_req(dev, state, ctx);
 289	}
 290
 291	ahash_request_complete(req, err);
 292}
 293
 294static void cc_digest_complete(struct device *dev, void *cc_req, int err)
 295{
 296	struct ahash_request *req = (struct ahash_request *)cc_req;
 297	struct ahash_req_ctx *state = ahash_request_ctx(req);
 298	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 299	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 300	u32 digestsize = crypto_ahash_digestsize(tfm);
 301
 302	dev_dbg(dev, "req=%pK\n", req);
 303
 304	if (err != -EINPROGRESS) {
 305		/* Not a BACKLOG notification */
 306		cc_unmap_hash_request(dev, state, req->src, false);
 307		cc_unmap_result(dev, state, digestsize, req->result);
 308		cc_unmap_req(dev, state, ctx);
 309	}
 310
 311	ahash_request_complete(req, err);
 312}
 313
 314static void cc_hash_complete(struct device *dev, void *cc_req, int err)
 315{
 316	struct ahash_request *req = (struct ahash_request *)cc_req;
 317	struct ahash_req_ctx *state = ahash_request_ctx(req);
 318	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 319	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 320	u32 digestsize = crypto_ahash_digestsize(tfm);
 321
 322	dev_dbg(dev, "req=%pK\n", req);
 323
 324	if (err != -EINPROGRESS) {
 325		/* Not a BACKLOG notification */
 326		cc_unmap_hash_request(dev, state, req->src, false);
 327		cc_unmap_result(dev, state, digestsize, req->result);
 328		cc_unmap_req(dev, state, ctx);
 329	}
 330
 331	ahash_request_complete(req, err);
 332}
 333
 334static int cc_fin_result(struct cc_hw_desc *desc, struct ahash_request *req,
 335			 int idx)
 336{
 337	struct ahash_req_ctx *state = ahash_request_ctx(req);
 338	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 339	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 340	u32 digestsize = crypto_ahash_digestsize(tfm);
 341
 342	/* Get final MAC result */
 343	hw_desc_init(&desc[idx]);
 344	set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
 345	/* TODO */
 346	set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
 347		      NS_BIT, 1);
 348	set_queue_last_ind(ctx->drvdata, &desc[idx]);
 349	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 350	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 351	set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
 352	cc_set_endianity(ctx->hash_mode, &desc[idx]);
 353	idx++;
 354
 355	return idx;
 356}
 357
 358static int cc_fin_hmac(struct cc_hw_desc *desc, struct ahash_request *req,
 359		       int idx)
 360{
 361	struct ahash_req_ctx *state = ahash_request_ctx(req);
 362	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 363	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 364	u32 digestsize = crypto_ahash_digestsize(tfm);
 365
 366	/* store the hash digest result in the context */
 367	hw_desc_init(&desc[idx]);
 368	set_cipher_mode(&desc[idx], ctx->hw_mode);
 369	set_dout_dlli(&desc[idx], state->digest_buff_dma_addr, digestsize,
 370		      NS_BIT, 0);
 371	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 372	cc_set_endianity(ctx->hash_mode, &desc[idx]);
 373	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 374	idx++;
 375
 376	/* Loading hash opad xor key state */
 377	hw_desc_init(&desc[idx]);
 378	set_cipher_mode(&desc[idx], ctx->hw_mode);
 379	set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
 380		     ctx->inter_digestsize, NS_BIT);
 381	set_flow_mode(&desc[idx], S_DIN_to_HASH);
 382	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 383	idx++;
 384
 385	/* Load the hash current length */
 386	hw_desc_init(&desc[idx]);
 387	set_cipher_mode(&desc[idx], ctx->hw_mode);
 388	set_din_sram(&desc[idx],
 389		     cc_digest_len_addr(ctx->drvdata, ctx->hash_mode),
 390		     ctx->hash_len);
 391	set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
 392	set_flow_mode(&desc[idx], S_DIN_to_HASH);
 393	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 394	idx++;
 395
 396	/* Memory Barrier: wait for IPAD/OPAD axi write to complete */
 397	hw_desc_init(&desc[idx]);
 398	set_din_no_dma(&desc[idx], 0, 0xfffff0);
 399	set_dout_no_dma(&desc[idx], 0, 0, 1);
 400	idx++;
 401
 402	/* Perform HASH update */
 403	hw_desc_init(&desc[idx]);
 404	set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
 405		     digestsize, NS_BIT);
 406	set_flow_mode(&desc[idx], DIN_HASH);
 407	idx++;
 408
 409	return idx;
 410}
 411
 412static int cc_hash_digest(struct ahash_request *req)
 413{
 414	struct ahash_req_ctx *state = ahash_request_ctx(req);
 415	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 416	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 417	u32 digestsize = crypto_ahash_digestsize(tfm);
 418	struct scatterlist *src = req->src;
 419	unsigned int nbytes = req->nbytes;
 420	u8 *result = req->result;
 421	struct device *dev = drvdata_to_dev(ctx->drvdata);
 422	bool is_hmac = ctx->is_hmac;
 423	struct cc_crypto_req cc_req = {};
 424	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
 425	cc_sram_addr_t larval_digest_addr =
 426		cc_larval_digest_addr(ctx->drvdata, ctx->hash_mode);
 427	int idx = 0;
 428	int rc = 0;
 429	gfp_t flags = cc_gfp_flags(&req->base);
 430
 431	dev_dbg(dev, "===== %s-digest (%d) ====\n", is_hmac ? "hmac" : "hash",
 432		nbytes);
 433
 434	cc_init_req(dev, state, ctx);
 435
 436	if (cc_map_req(dev, state, ctx)) {
 437		dev_err(dev, "map_ahash_source() failed\n");
 438		return -ENOMEM;
 439	}
 440
 441	if (cc_map_result(dev, state, digestsize)) {
 442		dev_err(dev, "map_ahash_digest() failed\n");
 443		cc_unmap_req(dev, state, ctx);
 444		return -ENOMEM;
 445	}
 446
 447	if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1,
 448				      flags)) {
 449		dev_err(dev, "map_ahash_request_final() failed\n");
 450		cc_unmap_result(dev, state, digestsize, result);
 451		cc_unmap_req(dev, state, ctx);
 452		return -ENOMEM;
 453	}
 454
 455	/* Setup request structure */
 456	cc_req.user_cb = cc_digest_complete;
 457	cc_req.user_arg = req;
 458
 459	/* If HMAC then load hash IPAD xor key, if HASH then load initial
 460	 * digest
 461	 */
 462	hw_desc_init(&desc[idx]);
 463	set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
 464	if (is_hmac) {
 465		set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
 466			     ctx->inter_digestsize, NS_BIT);
 467	} else {
 
 
 468		set_din_sram(&desc[idx], larval_digest_addr,
 469			     ctx->inter_digestsize);
 470	}
 471	set_flow_mode(&desc[idx], S_DIN_to_HASH);
 472	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 473	idx++;
 474
 475	/* Load the hash current length */
 476	hw_desc_init(&desc[idx]);
 477	set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
 478
 479	if (is_hmac) {
 480		set_din_type(&desc[idx], DMA_DLLI,
 481			     state->digest_bytes_len_dma_addr,
 482			     ctx->hash_len, NS_BIT);
 483	} else {
 484		set_din_const(&desc[idx], 0, ctx->hash_len);
 485		if (nbytes)
 486			set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
 487		else
 488			set_cipher_do(&desc[idx], DO_PAD);
 489	}
 490	set_flow_mode(&desc[idx], S_DIN_to_HASH);
 491	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 492	idx++;
 493
 494	cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx);
 495
 496	if (is_hmac) {
 497		/* HW last hash block padding (aka. "DO_PAD") */
 498		hw_desc_init(&desc[idx]);
 499		set_cipher_mode(&desc[idx], ctx->hw_mode);
 500		set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
 501			      ctx->hash_len, NS_BIT, 0);
 502		set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 503		set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
 504		set_cipher_do(&desc[idx], DO_PAD);
 505		idx++;
 506
 507		idx = cc_fin_hmac(desc, req, idx);
 508	}
 509
 510	idx = cc_fin_result(desc, req, idx);
 511
 512	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
 513	if (rc != -EINPROGRESS && rc != -EBUSY) {
 514		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
 515		cc_unmap_hash_request(dev, state, src, true);
 516		cc_unmap_result(dev, state, digestsize, result);
 517		cc_unmap_req(dev, state, ctx);
 518	}
 519	return rc;
 520}
 521
 522static int cc_restore_hash(struct cc_hw_desc *desc, struct cc_hash_ctx *ctx,
 523			   struct ahash_req_ctx *state, unsigned int idx)
 524{
 525	/* Restore hash digest */
 526	hw_desc_init(&desc[idx]);
 527	set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
 528	set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
 529		     ctx->inter_digestsize, NS_BIT);
 530	set_flow_mode(&desc[idx], S_DIN_to_HASH);
 531	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 532	idx++;
 533
 534	/* Restore hash current length */
 535	hw_desc_init(&desc[idx]);
 536	set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
 537	set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
 538	set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
 539		     ctx->hash_len, NS_BIT);
 540	set_flow_mode(&desc[idx], S_DIN_to_HASH);
 541	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 542	idx++;
 543
 544	cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx);
 545
 546	return idx;
 547}
 548
 549static int cc_hash_update(struct ahash_request *req)
 550{
 551	struct ahash_req_ctx *state = ahash_request_ctx(req);
 552	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 553	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 554	unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
 555	struct scatterlist *src = req->src;
 556	unsigned int nbytes = req->nbytes;
 557	struct device *dev = drvdata_to_dev(ctx->drvdata);
 558	struct cc_crypto_req cc_req = {};
 559	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
 560	u32 idx = 0;
 561	int rc;
 562	gfp_t flags = cc_gfp_flags(&req->base);
 563
 564	dev_dbg(dev, "===== %s-update (%d) ====\n", ctx->is_hmac ?
 565		"hmac" : "hash", nbytes);
 566
 567	if (nbytes == 0) {
 568		/* no real updates required */
 569		return 0;
 570	}
 571
 572	rc = cc_map_hash_request_update(ctx->drvdata, state, src, nbytes,
 573					block_size, flags);
 574	if (rc) {
 575		if (rc == 1) {
 576			dev_dbg(dev, " data size not require HW update %x\n",
 577				nbytes);
 578			/* No hardware updates are required */
 579			return 0;
 580		}
 581		dev_err(dev, "map_ahash_request_update() failed\n");
 582		return -ENOMEM;
 583	}
 584
 585	if (cc_map_req(dev, state, ctx)) {
 586		dev_err(dev, "map_ahash_source() failed\n");
 587		cc_unmap_hash_request(dev, state, src, true);
 588		return -EINVAL;
 589	}
 590
 591	/* Setup request structure */
 592	cc_req.user_cb = cc_update_complete;
 593	cc_req.user_arg = req;
 594
 595	idx = cc_restore_hash(desc, ctx, state, idx);
 596
 597	/* store the hash digest result in context */
 598	hw_desc_init(&desc[idx]);
 599	set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
 600	set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
 601		      ctx->inter_digestsize, NS_BIT, 0);
 602	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 603	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 604	idx++;
 605
 606	/* store current hash length in context */
 607	hw_desc_init(&desc[idx]);
 608	set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
 609	set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
 610		      ctx->hash_len, NS_BIT, 1);
 611	set_queue_last_ind(ctx->drvdata, &desc[idx]);
 612	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 613	set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
 614	idx++;
 615
 616	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
 617	if (rc != -EINPROGRESS && rc != -EBUSY) {
 618		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
 619		cc_unmap_hash_request(dev, state, src, true);
 620		cc_unmap_req(dev, state, ctx);
 621	}
 622	return rc;
 623}
 624
 625static int cc_do_finup(struct ahash_request *req, bool update)
 626{
 627	struct ahash_req_ctx *state = ahash_request_ctx(req);
 628	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 629	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 630	u32 digestsize = crypto_ahash_digestsize(tfm);
 631	struct scatterlist *src = req->src;
 632	unsigned int nbytes = req->nbytes;
 633	u8 *result = req->result;
 634	struct device *dev = drvdata_to_dev(ctx->drvdata);
 635	bool is_hmac = ctx->is_hmac;
 636	struct cc_crypto_req cc_req = {};
 637	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
 638	unsigned int idx = 0;
 639	int rc;
 640	gfp_t flags = cc_gfp_flags(&req->base);
 641
 642	dev_dbg(dev, "===== %s-%s (%d) ====\n", is_hmac ? "hmac" : "hash",
 643		update ? "finup" : "final", nbytes);
 644
 645	if (cc_map_req(dev, state, ctx)) {
 646		dev_err(dev, "map_ahash_source() failed\n");
 647		return -EINVAL;
 648	}
 649
 650	if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, update,
 651				      flags)) {
 652		dev_err(dev, "map_ahash_request_final() failed\n");
 653		cc_unmap_req(dev, state, ctx);
 654		return -ENOMEM;
 655	}
 656	if (cc_map_result(dev, state, digestsize)) {
 657		dev_err(dev, "map_ahash_digest() failed\n");
 658		cc_unmap_hash_request(dev, state, src, true);
 659		cc_unmap_req(dev, state, ctx);
 660		return -ENOMEM;
 661	}
 662
 663	/* Setup request structure */
 664	cc_req.user_cb = cc_hash_complete;
 665	cc_req.user_arg = req;
 666
 667	idx = cc_restore_hash(desc, ctx, state, idx);
 668
 669	/* Pad the hash */
 670	hw_desc_init(&desc[idx]);
 671	set_cipher_do(&desc[idx], DO_PAD);
 672	set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
 673	set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
 674		      ctx->hash_len, NS_BIT, 0);
 675	set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
 676	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 677	idx++;
 678
 679	if (is_hmac)
 680		idx = cc_fin_hmac(desc, req, idx);
 681
 682	idx = cc_fin_result(desc, req, idx);
 683
 684	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
 685	if (rc != -EINPROGRESS && rc != -EBUSY) {
 686		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
 687		cc_unmap_hash_request(dev, state, src, true);
 688		cc_unmap_result(dev, state, digestsize, result);
 689		cc_unmap_req(dev, state, ctx);
 690	}
 691	return rc;
 692}
 693
 694static int cc_hash_finup(struct ahash_request *req)
 695{
 696	return cc_do_finup(req, true);
 697}
 698
 699
 700static int cc_hash_final(struct ahash_request *req)
 701{
 702	return cc_do_finup(req, false);
 703}
 704
 705static int cc_hash_init(struct ahash_request *req)
 706{
 707	struct ahash_req_ctx *state = ahash_request_ctx(req);
 708	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 709	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 710	struct device *dev = drvdata_to_dev(ctx->drvdata);
 711
 712	dev_dbg(dev, "===== init (%d) ====\n", req->nbytes);
 713
 714	cc_init_req(dev, state, ctx);
 715
 716	return 0;
 717}
 718
 719static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
 720			  unsigned int keylen)
 721{
 722	unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
 723	struct cc_crypto_req cc_req = {};
 724	struct cc_hash_ctx *ctx = NULL;
 725	int blocksize = 0;
 726	int digestsize = 0;
 727	int i, idx = 0, rc = 0;
 728	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
 729	cc_sram_addr_t larval_addr;
 730	struct device *dev;
 731
 732	ctx = crypto_ahash_ctx(ahash);
 733	dev = drvdata_to_dev(ctx->drvdata);
 734	dev_dbg(dev, "start keylen: %d", keylen);
 735
 736	blocksize = crypto_tfm_alg_blocksize(&ahash->base);
 737	digestsize = crypto_ahash_digestsize(ahash);
 738
 739	larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->hash_mode);
 740
 741	/* The keylen value distinguishes HASH in case keylen is ZERO bytes,
 742	 * any NON-ZERO value utilizes HMAC flow
 743	 */
 744	ctx->key_params.keylen = keylen;
 745	ctx->key_params.key_dma_addr = 0;
 746	ctx->is_hmac = true;
 747	ctx->key_params.key = NULL;
 748
 749	if (keylen) {
 750		ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL);
 751		if (!ctx->key_params.key)
 752			return -ENOMEM;
 753
 754		ctx->key_params.key_dma_addr =
 755			dma_map_single(dev, (void *)ctx->key_params.key, keylen,
 756				       DMA_TO_DEVICE);
 757		if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
 758			dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
 759				ctx->key_params.key, keylen);
 760			kzfree(ctx->key_params.key);
 761			return -ENOMEM;
 762		}
 763		dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
 764			&ctx->key_params.key_dma_addr, ctx->key_params.keylen);
 765
 766		if (keylen > blocksize) {
 767			/* Load hash initial state */
 768			hw_desc_init(&desc[idx]);
 769			set_cipher_mode(&desc[idx], ctx->hw_mode);
 770			set_din_sram(&desc[idx], larval_addr,
 771				     ctx->inter_digestsize);
 772			set_flow_mode(&desc[idx], S_DIN_to_HASH);
 773			set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 774			idx++;
 775
 776			/* Load the hash current length*/
 777			hw_desc_init(&desc[idx]);
 778			set_cipher_mode(&desc[idx], ctx->hw_mode);
 779			set_din_const(&desc[idx], 0, ctx->hash_len);
 780			set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
 781			set_flow_mode(&desc[idx], S_DIN_to_HASH);
 782			set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 783			idx++;
 784
 785			hw_desc_init(&desc[idx]);
 786			set_din_type(&desc[idx], DMA_DLLI,
 787				     ctx->key_params.key_dma_addr, keylen,
 788				     NS_BIT);
 789			set_flow_mode(&desc[idx], DIN_HASH);
 790			idx++;
 791
 792			/* Get hashed key */
 793			hw_desc_init(&desc[idx]);
 794			set_cipher_mode(&desc[idx], ctx->hw_mode);
 795			set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
 796				      digestsize, NS_BIT, 0);
 797			set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 798			set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 799			set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
 800			cc_set_endianity(ctx->hash_mode, &desc[idx]);
 801			idx++;
 802
 803			hw_desc_init(&desc[idx]);
 804			set_din_const(&desc[idx], 0, (blocksize - digestsize));
 805			set_flow_mode(&desc[idx], BYPASS);
 806			set_dout_dlli(&desc[idx],
 807				      (ctx->opad_tmp_keys_dma_addr +
 808				       digestsize),
 809				      (blocksize - digestsize), NS_BIT, 0);
 810			idx++;
 811		} else {
 812			hw_desc_init(&desc[idx]);
 813			set_din_type(&desc[idx], DMA_DLLI,
 814				     ctx->key_params.key_dma_addr, keylen,
 815				     NS_BIT);
 816			set_flow_mode(&desc[idx], BYPASS);
 817			set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
 818				      keylen, NS_BIT, 0);
 819			idx++;
 820
 821			if ((blocksize - keylen)) {
 822				hw_desc_init(&desc[idx]);
 823				set_din_const(&desc[idx], 0,
 824					      (blocksize - keylen));
 825				set_flow_mode(&desc[idx], BYPASS);
 826				set_dout_dlli(&desc[idx],
 827					      (ctx->opad_tmp_keys_dma_addr +
 828					       keylen), (blocksize - keylen),
 829					      NS_BIT, 0);
 830				idx++;
 831			}
 832		}
 833	} else {
 834		hw_desc_init(&desc[idx]);
 835		set_din_const(&desc[idx], 0, blocksize);
 836		set_flow_mode(&desc[idx], BYPASS);
 837		set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr),
 838			      blocksize, NS_BIT, 0);
 839		idx++;
 840	}
 841
 842	rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
 843	if (rc) {
 844		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
 845		goto out;
 846	}
 847
 848	/* calc derived HMAC key */
 849	for (idx = 0, i = 0; i < 2; i++) {
 850		/* Load hash initial state */
 851		hw_desc_init(&desc[idx]);
 852		set_cipher_mode(&desc[idx], ctx->hw_mode);
 853		set_din_sram(&desc[idx], larval_addr, ctx->inter_digestsize);
 854		set_flow_mode(&desc[idx], S_DIN_to_HASH);
 855		set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 856		idx++;
 857
 858		/* Load the hash current length*/
 859		hw_desc_init(&desc[idx]);
 860		set_cipher_mode(&desc[idx], ctx->hw_mode);
 861		set_din_const(&desc[idx], 0, ctx->hash_len);
 862		set_flow_mode(&desc[idx], S_DIN_to_HASH);
 863		set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 864		idx++;
 865
 866		/* Prepare ipad key */
 867		hw_desc_init(&desc[idx]);
 868		set_xor_val(&desc[idx], hmac_pad_const[i]);
 869		set_cipher_mode(&desc[idx], ctx->hw_mode);
 870		set_flow_mode(&desc[idx], S_DIN_to_HASH);
 871		set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
 872		idx++;
 873
 874		/* Perform HASH update */
 875		hw_desc_init(&desc[idx]);
 876		set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
 877			     blocksize, NS_BIT);
 878		set_cipher_mode(&desc[idx], ctx->hw_mode);
 879		set_xor_active(&desc[idx]);
 880		set_flow_mode(&desc[idx], DIN_HASH);
 881		idx++;
 882
 883		/* Get the IPAD/OPAD xor key (Note, IPAD is the initial digest
 884		 * of the first HASH "update" state)
 885		 */
 886		hw_desc_init(&desc[idx]);
 887		set_cipher_mode(&desc[idx], ctx->hw_mode);
 888		if (i > 0) /* Not first iteration */
 889			set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
 890				      ctx->inter_digestsize, NS_BIT, 0);
 891		else /* First iteration */
 892			set_dout_dlli(&desc[idx], ctx->digest_buff_dma_addr,
 893				      ctx->inter_digestsize, NS_BIT, 0);
 894		set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 895		set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 896		idx++;
 897	}
 898
 899	rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
 900
 901out:
 902	if (rc)
 903		crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
 904
 905	if (ctx->key_params.key_dma_addr) {
 906		dma_unmap_single(dev, ctx->key_params.key_dma_addr,
 907				 ctx->key_params.keylen, DMA_TO_DEVICE);
 908		dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
 909			&ctx->key_params.key_dma_addr, ctx->key_params.keylen);
 910	}
 911
 912	kzfree(ctx->key_params.key);
 913
 914	return rc;
 915}
 916
 917static int cc_xcbc_setkey(struct crypto_ahash *ahash,
 918			  const u8 *key, unsigned int keylen)
 919{
 920	struct cc_crypto_req cc_req = {};
 921	struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
 922	struct device *dev = drvdata_to_dev(ctx->drvdata);
 923	int rc = 0;
 924	unsigned int idx = 0;
 925	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
 926
 927	dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
 928
 929	switch (keylen) {
 930	case AES_KEYSIZE_128:
 931	case AES_KEYSIZE_192:
 932	case AES_KEYSIZE_256:
 933		break;
 934	default:
 935		return -EINVAL;
 936	}
 937
 938	ctx->key_params.keylen = keylen;
 939
 940	ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL);
 941	if (!ctx->key_params.key)
 942		return -ENOMEM;
 943
 944	ctx->key_params.key_dma_addr =
 945		dma_map_single(dev, ctx->key_params.key, keylen, DMA_TO_DEVICE);
 946	if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
 947		dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
 948			key, keylen);
 949		kzfree(ctx->key_params.key);
 950		return -ENOMEM;
 951	}
 952	dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
 953		&ctx->key_params.key_dma_addr, ctx->key_params.keylen);
 954
 955	ctx->is_hmac = true;
 956	/* 1. Load the AES key */
 957	hw_desc_init(&desc[idx]);
 958	set_din_type(&desc[idx], DMA_DLLI, ctx->key_params.key_dma_addr,
 959		     keylen, NS_BIT);
 960	set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
 961	set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
 962	set_key_size_aes(&desc[idx], keylen);
 963	set_flow_mode(&desc[idx], S_DIN_to_AES);
 964	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 965	idx++;
 966
 967	hw_desc_init(&desc[idx]);
 968	set_din_const(&desc[idx], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
 969	set_flow_mode(&desc[idx], DIN_AES_DOUT);
 970	set_dout_dlli(&desc[idx],
 971		      (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET),
 972		      CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
 973	idx++;
 974
 975	hw_desc_init(&desc[idx]);
 976	set_din_const(&desc[idx], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
 977	set_flow_mode(&desc[idx], DIN_AES_DOUT);
 978	set_dout_dlli(&desc[idx],
 979		      (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET),
 980		      CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
 981	idx++;
 982
 983	hw_desc_init(&desc[idx]);
 984	set_din_const(&desc[idx], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
 985	set_flow_mode(&desc[idx], DIN_AES_DOUT);
 986	set_dout_dlli(&desc[idx],
 987		      (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET),
 988		      CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
 989	idx++;
 990
 991	rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
 992
 993	if (rc)
 994		crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
 995
 996	dma_unmap_single(dev, ctx->key_params.key_dma_addr,
 997			 ctx->key_params.keylen, DMA_TO_DEVICE);
 998	dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
 999		&ctx->key_params.key_dma_addr, ctx->key_params.keylen);
1000
1001	kzfree(ctx->key_params.key);
1002
1003	return rc;
1004}
1005
1006static int cc_cmac_setkey(struct crypto_ahash *ahash,
1007			  const u8 *key, unsigned int keylen)
1008{
1009	struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1010	struct device *dev = drvdata_to_dev(ctx->drvdata);
1011
1012	dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
1013
1014	ctx->is_hmac = true;
1015
1016	switch (keylen) {
1017	case AES_KEYSIZE_128:
1018	case AES_KEYSIZE_192:
1019	case AES_KEYSIZE_256:
1020		break;
1021	default:
1022		return -EINVAL;
1023	}
1024
1025	ctx->key_params.keylen = keylen;
1026
1027	/* STAT_PHASE_1: Copy key to ctx */
1028
1029	dma_sync_single_for_cpu(dev, ctx->opad_tmp_keys_dma_addr,
1030				keylen, DMA_TO_DEVICE);
1031
1032	memcpy(ctx->opad_tmp_keys_buff, key, keylen);
1033	if (keylen == 24) {
1034		memset(ctx->opad_tmp_keys_buff + 24, 0,
1035		       CC_AES_KEY_SIZE_MAX - 24);
1036	}
1037
1038	dma_sync_single_for_device(dev, ctx->opad_tmp_keys_dma_addr,
1039				   keylen, DMA_TO_DEVICE);
1040
1041	ctx->key_params.keylen = keylen;
1042
1043	return 0;
1044}
1045
1046static void cc_free_ctx(struct cc_hash_ctx *ctx)
1047{
1048	struct device *dev = drvdata_to_dev(ctx->drvdata);
1049
1050	if (ctx->digest_buff_dma_addr) {
1051		dma_unmap_single(dev, ctx->digest_buff_dma_addr,
1052				 sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
1053		dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
1054			&ctx->digest_buff_dma_addr);
1055		ctx->digest_buff_dma_addr = 0;
1056	}
1057	if (ctx->opad_tmp_keys_dma_addr) {
1058		dma_unmap_single(dev, ctx->opad_tmp_keys_dma_addr,
1059				 sizeof(ctx->opad_tmp_keys_buff),
1060				 DMA_BIDIRECTIONAL);
1061		dev_dbg(dev, "Unmapped opad-digest: opad_tmp_keys_dma_addr=%pad\n",
1062			&ctx->opad_tmp_keys_dma_addr);
1063		ctx->opad_tmp_keys_dma_addr = 0;
1064	}
1065
1066	ctx->key_params.keylen = 0;
1067}
1068
1069static int cc_alloc_ctx(struct cc_hash_ctx *ctx)
1070{
1071	struct device *dev = drvdata_to_dev(ctx->drvdata);
1072
1073	ctx->key_params.keylen = 0;
1074
1075	ctx->digest_buff_dma_addr =
1076		dma_map_single(dev, (void *)ctx->digest_buff,
1077			       sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
1078	if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) {
1079		dev_err(dev, "Mapping digest len %zu B at va=%pK for DMA failed\n",
1080			sizeof(ctx->digest_buff), ctx->digest_buff);
1081		goto fail;
1082	}
1083	dev_dbg(dev, "Mapped digest %zu B at va=%pK to dma=%pad\n",
1084		sizeof(ctx->digest_buff), ctx->digest_buff,
1085		&ctx->digest_buff_dma_addr);
1086
1087	ctx->opad_tmp_keys_dma_addr =
1088		dma_map_single(dev, (void *)ctx->opad_tmp_keys_buff,
1089			       sizeof(ctx->opad_tmp_keys_buff),
1090			       DMA_BIDIRECTIONAL);
1091	if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) {
1092		dev_err(dev, "Mapping opad digest %zu B at va=%pK for DMA failed\n",
1093			sizeof(ctx->opad_tmp_keys_buff),
1094			ctx->opad_tmp_keys_buff);
1095		goto fail;
1096	}
1097	dev_dbg(dev, "Mapped opad_tmp_keys %zu B at va=%pK to dma=%pad\n",
1098		sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff,
1099		&ctx->opad_tmp_keys_dma_addr);
1100
1101	ctx->is_hmac = false;
1102	return 0;
1103
1104fail:
1105	cc_free_ctx(ctx);
1106	return -ENOMEM;
1107}
1108
1109static int cc_get_hash_len(struct crypto_tfm *tfm)
1110{
1111	struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1112
1113	if (ctx->hash_mode == DRV_HASH_SM3)
1114		return CC_SM3_HASH_LEN_SIZE;
1115	else
1116		return cc_get_default_hash_len(ctx->drvdata);
1117}
1118
1119static int cc_cra_init(struct crypto_tfm *tfm)
1120{
1121	struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1122	struct hash_alg_common *hash_alg_common =
1123		container_of(tfm->__crt_alg, struct hash_alg_common, base);
1124	struct ahash_alg *ahash_alg =
1125		container_of(hash_alg_common, struct ahash_alg, halg);
1126	struct cc_hash_alg *cc_alg =
1127			container_of(ahash_alg, struct cc_hash_alg, ahash_alg);
1128
1129	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1130				 sizeof(struct ahash_req_ctx));
1131
1132	ctx->hash_mode = cc_alg->hash_mode;
1133	ctx->hw_mode = cc_alg->hw_mode;
1134	ctx->inter_digestsize = cc_alg->inter_digestsize;
1135	ctx->drvdata = cc_alg->drvdata;
1136	ctx->hash_len = cc_get_hash_len(tfm);
1137	return cc_alloc_ctx(ctx);
1138}
1139
1140static void cc_cra_exit(struct crypto_tfm *tfm)
1141{
1142	struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1143	struct device *dev = drvdata_to_dev(ctx->drvdata);
1144
1145	dev_dbg(dev, "cc_cra_exit");
1146	cc_free_ctx(ctx);
1147}
1148
1149static int cc_mac_update(struct ahash_request *req)
1150{
1151	struct ahash_req_ctx *state = ahash_request_ctx(req);
1152	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1153	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1154	struct device *dev = drvdata_to_dev(ctx->drvdata);
1155	unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
1156	struct cc_crypto_req cc_req = {};
1157	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1158	int rc;
1159	u32 idx = 0;
1160	gfp_t flags = cc_gfp_flags(&req->base);
1161
1162	if (req->nbytes == 0) {
1163		/* no real updates required */
1164		return 0;
1165	}
1166
1167	state->xcbc_count++;
1168
1169	rc = cc_map_hash_request_update(ctx->drvdata, state, req->src,
1170					req->nbytes, block_size, flags);
1171	if (rc) {
1172		if (rc == 1) {
1173			dev_dbg(dev, " data size not require HW update %x\n",
1174				req->nbytes);
1175			/* No hardware updates are required */
1176			return 0;
1177		}
1178		dev_err(dev, "map_ahash_request_update() failed\n");
1179		return -ENOMEM;
1180	}
1181
1182	if (cc_map_req(dev, state, ctx)) {
1183		dev_err(dev, "map_ahash_source() failed\n");
1184		return -EINVAL;
1185	}
1186
1187	if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
1188		cc_setup_xcbc(req, desc, &idx);
1189	else
1190		cc_setup_cmac(req, desc, &idx);
1191
1192	cc_set_desc(state, ctx, DIN_AES_DOUT, desc, true, &idx);
1193
1194	/* store the hash digest result in context */
1195	hw_desc_init(&desc[idx]);
1196	set_cipher_mode(&desc[idx], ctx->hw_mode);
1197	set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
1198		      ctx->inter_digestsize, NS_BIT, 1);
1199	set_queue_last_ind(ctx->drvdata, &desc[idx]);
1200	set_flow_mode(&desc[idx], S_AES_to_DOUT);
1201	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1202	idx++;
1203
1204	/* Setup request structure */
1205	cc_req.user_cb = (void *)cc_update_complete;
1206	cc_req.user_arg = (void *)req;
1207
1208	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1209	if (rc != -EINPROGRESS && rc != -EBUSY) {
1210		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1211		cc_unmap_hash_request(dev, state, req->src, true);
1212		cc_unmap_req(dev, state, ctx);
1213	}
1214	return rc;
1215}
1216
1217static int cc_mac_final(struct ahash_request *req)
1218{
1219	struct ahash_req_ctx *state = ahash_request_ctx(req);
1220	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1221	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1222	struct device *dev = drvdata_to_dev(ctx->drvdata);
1223	struct cc_crypto_req cc_req = {};
1224	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1225	int idx = 0;
1226	int rc = 0;
1227	u32 key_size, key_len;
1228	u32 digestsize = crypto_ahash_digestsize(tfm);
1229	gfp_t flags = cc_gfp_flags(&req->base);
1230	u32 rem_cnt = *cc_hash_buf_cnt(state);
1231
1232	if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1233		key_size = CC_AES_128_BIT_KEY_SIZE;
1234		key_len  = CC_AES_128_BIT_KEY_SIZE;
1235	} else {
1236		key_size = (ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
1237			ctx->key_params.keylen;
1238		key_len =  ctx->key_params.keylen;
1239	}
1240
1241	dev_dbg(dev, "===== final  xcbc reminder (%d) ====\n", rem_cnt);
1242
1243	if (cc_map_req(dev, state, ctx)) {
1244		dev_err(dev, "map_ahash_source() failed\n");
1245		return -EINVAL;
1246	}
1247
1248	if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
1249				      req->nbytes, 0, flags)) {
1250		dev_err(dev, "map_ahash_request_final() failed\n");
1251		cc_unmap_req(dev, state, ctx);
1252		return -ENOMEM;
1253	}
1254
1255	if (cc_map_result(dev, state, digestsize)) {
1256		dev_err(dev, "map_ahash_digest() failed\n");
1257		cc_unmap_hash_request(dev, state, req->src, true);
1258		cc_unmap_req(dev, state, ctx);
1259		return -ENOMEM;
1260	}
1261
1262	/* Setup request structure */
1263	cc_req.user_cb = (void *)cc_hash_complete;
1264	cc_req.user_arg = (void *)req;
1265
1266	if (state->xcbc_count && rem_cnt == 0) {
1267		/* Load key for ECB decryption */
1268		hw_desc_init(&desc[idx]);
1269		set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
1270		set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_DECRYPT);
1271		set_din_type(&desc[idx], DMA_DLLI,
1272			     (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET),
1273			     key_size, NS_BIT);
1274		set_key_size_aes(&desc[idx], key_len);
1275		set_flow_mode(&desc[idx], S_DIN_to_AES);
1276		set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1277		idx++;
1278
1279		/* Initiate decryption of block state to previous
1280		 * block_state-XOR-M[n]
1281		 */
1282		hw_desc_init(&desc[idx]);
1283		set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
1284			     CC_AES_BLOCK_SIZE, NS_BIT);
1285		set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
1286			      CC_AES_BLOCK_SIZE, NS_BIT, 0);
1287		set_flow_mode(&desc[idx], DIN_AES_DOUT);
1288		idx++;
1289
1290		/* Memory Barrier: wait for axi write to complete */
1291		hw_desc_init(&desc[idx]);
1292		set_din_no_dma(&desc[idx], 0, 0xfffff0);
1293		set_dout_no_dma(&desc[idx], 0, 0, 1);
1294		idx++;
1295	}
1296
1297	if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
1298		cc_setup_xcbc(req, desc, &idx);
1299	else
1300		cc_setup_cmac(req, desc, &idx);
1301
1302	if (state->xcbc_count == 0) {
1303		hw_desc_init(&desc[idx]);
1304		set_cipher_mode(&desc[idx], ctx->hw_mode);
1305		set_key_size_aes(&desc[idx], key_len);
1306		set_cmac_size0_mode(&desc[idx]);
1307		set_flow_mode(&desc[idx], S_DIN_to_AES);
1308		idx++;
1309	} else if (rem_cnt > 0) {
1310		cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1311	} else {
1312		hw_desc_init(&desc[idx]);
1313		set_din_const(&desc[idx], 0x00, CC_AES_BLOCK_SIZE);
1314		set_flow_mode(&desc[idx], DIN_AES_DOUT);
1315		idx++;
1316	}
1317
1318	/* Get final MAC result */
1319	hw_desc_init(&desc[idx]);
1320	/* TODO */
1321	set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1322		      digestsize, NS_BIT, 1);
1323	set_queue_last_ind(ctx->drvdata, &desc[idx]);
1324	set_flow_mode(&desc[idx], S_AES_to_DOUT);
1325	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1326	set_cipher_mode(&desc[idx], ctx->hw_mode);
1327	idx++;
1328
1329	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1330	if (rc != -EINPROGRESS && rc != -EBUSY) {
1331		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1332		cc_unmap_hash_request(dev, state, req->src, true);
1333		cc_unmap_result(dev, state, digestsize, req->result);
1334		cc_unmap_req(dev, state, ctx);
1335	}
1336	return rc;
1337}
1338
1339static int cc_mac_finup(struct ahash_request *req)
1340{
1341	struct ahash_req_ctx *state = ahash_request_ctx(req);
1342	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1343	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1344	struct device *dev = drvdata_to_dev(ctx->drvdata);
1345	struct cc_crypto_req cc_req = {};
1346	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1347	int idx = 0;
1348	int rc = 0;
1349	u32 key_len = 0;
1350	u32 digestsize = crypto_ahash_digestsize(tfm);
1351	gfp_t flags = cc_gfp_flags(&req->base);
1352
1353	dev_dbg(dev, "===== finup xcbc(%d) ====\n", req->nbytes);
1354	if (state->xcbc_count > 0 && req->nbytes == 0) {
1355		dev_dbg(dev, "No data to update. Call to fdx_mac_final\n");
1356		return cc_mac_final(req);
1357	}
1358
1359	if (cc_map_req(dev, state, ctx)) {
1360		dev_err(dev, "map_ahash_source() failed\n");
1361		return -EINVAL;
1362	}
1363
1364	if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
1365				      req->nbytes, 1, flags)) {
1366		dev_err(dev, "map_ahash_request_final() failed\n");
1367		cc_unmap_req(dev, state, ctx);
1368		return -ENOMEM;
1369	}
1370	if (cc_map_result(dev, state, digestsize)) {
1371		dev_err(dev, "map_ahash_digest() failed\n");
1372		cc_unmap_hash_request(dev, state, req->src, true);
1373		cc_unmap_req(dev, state, ctx);
1374		return -ENOMEM;
1375	}
1376
1377	/* Setup request structure */
1378	cc_req.user_cb = (void *)cc_hash_complete;
1379	cc_req.user_arg = (void *)req;
1380
1381	if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1382		key_len = CC_AES_128_BIT_KEY_SIZE;
1383		cc_setup_xcbc(req, desc, &idx);
1384	} else {
1385		key_len = ctx->key_params.keylen;
1386		cc_setup_cmac(req, desc, &idx);
1387	}
1388
1389	if (req->nbytes == 0) {
1390		hw_desc_init(&desc[idx]);
1391		set_cipher_mode(&desc[idx], ctx->hw_mode);
1392		set_key_size_aes(&desc[idx], key_len);
1393		set_cmac_size0_mode(&desc[idx]);
1394		set_flow_mode(&desc[idx], S_DIN_to_AES);
1395		idx++;
1396	} else {
1397		cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1398	}
1399
1400	/* Get final MAC result */
1401	hw_desc_init(&desc[idx]);
1402	/* TODO */
1403	set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1404		      digestsize, NS_BIT, 1);
1405	set_queue_last_ind(ctx->drvdata, &desc[idx]);
1406	set_flow_mode(&desc[idx], S_AES_to_DOUT);
1407	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1408	set_cipher_mode(&desc[idx], ctx->hw_mode);
1409	idx++;
1410
1411	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1412	if (rc != -EINPROGRESS && rc != -EBUSY) {
1413		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1414		cc_unmap_hash_request(dev, state, req->src, true);
1415		cc_unmap_result(dev, state, digestsize, req->result);
1416		cc_unmap_req(dev, state, ctx);
1417	}
1418	return rc;
1419}
1420
1421static int cc_mac_digest(struct ahash_request *req)
1422{
1423	struct ahash_req_ctx *state = ahash_request_ctx(req);
1424	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1425	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1426	struct device *dev = drvdata_to_dev(ctx->drvdata);
1427	u32 digestsize = crypto_ahash_digestsize(tfm);
1428	struct cc_crypto_req cc_req = {};
1429	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1430	u32 key_len;
1431	unsigned int idx = 0;
1432	int rc;
1433	gfp_t flags = cc_gfp_flags(&req->base);
1434
1435	dev_dbg(dev, "===== -digest mac (%d) ====\n",  req->nbytes);
1436
1437	cc_init_req(dev, state, ctx);
1438
1439	if (cc_map_req(dev, state, ctx)) {
1440		dev_err(dev, "map_ahash_source() failed\n");
1441		return -ENOMEM;
1442	}
1443	if (cc_map_result(dev, state, digestsize)) {
1444		dev_err(dev, "map_ahash_digest() failed\n");
1445		cc_unmap_req(dev, state, ctx);
1446		return -ENOMEM;
1447	}
1448
1449	if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
1450				      req->nbytes, 1, flags)) {
1451		dev_err(dev, "map_ahash_request_final() failed\n");
1452		cc_unmap_req(dev, state, ctx);
1453		return -ENOMEM;
1454	}
1455
1456	/* Setup request structure */
1457	cc_req.user_cb = (void *)cc_digest_complete;
1458	cc_req.user_arg = (void *)req;
1459
1460	if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1461		key_len = CC_AES_128_BIT_KEY_SIZE;
1462		cc_setup_xcbc(req, desc, &idx);
1463	} else {
1464		key_len = ctx->key_params.keylen;
1465		cc_setup_cmac(req, desc, &idx);
1466	}
1467
1468	if (req->nbytes == 0) {
1469		hw_desc_init(&desc[idx]);
1470		set_cipher_mode(&desc[idx], ctx->hw_mode);
1471		set_key_size_aes(&desc[idx], key_len);
1472		set_cmac_size0_mode(&desc[idx]);
1473		set_flow_mode(&desc[idx], S_DIN_to_AES);
1474		idx++;
1475	} else {
1476		cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1477	}
1478
1479	/* Get final MAC result */
1480	hw_desc_init(&desc[idx]);
1481	set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1482		      CC_AES_BLOCK_SIZE, NS_BIT, 1);
1483	set_queue_last_ind(ctx->drvdata, &desc[idx]);
1484	set_flow_mode(&desc[idx], S_AES_to_DOUT);
1485	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1486	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1487	set_cipher_mode(&desc[idx], ctx->hw_mode);
1488	idx++;
1489
1490	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1491	if (rc != -EINPROGRESS && rc != -EBUSY) {
1492		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1493		cc_unmap_hash_request(dev, state, req->src, true);
1494		cc_unmap_result(dev, state, digestsize, req->result);
1495		cc_unmap_req(dev, state, ctx);
1496	}
1497	return rc;
1498}
1499
1500static int cc_hash_export(struct ahash_request *req, void *out)
1501{
1502	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1503	struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1504	struct ahash_req_ctx *state = ahash_request_ctx(req);
1505	u8 *curr_buff = cc_hash_buf(state);
1506	u32 curr_buff_cnt = *cc_hash_buf_cnt(state);
1507	const u32 tmp = CC_EXPORT_MAGIC;
1508
1509	memcpy(out, &tmp, sizeof(u32));
1510	out += sizeof(u32);
1511
1512	memcpy(out, state->digest_buff, ctx->inter_digestsize);
1513	out += ctx->inter_digestsize;
1514
1515	memcpy(out, state->digest_bytes_len, ctx->hash_len);
1516	out += ctx->hash_len;
1517
1518	memcpy(out, &curr_buff_cnt, sizeof(u32));
1519	out += sizeof(u32);
1520
1521	memcpy(out, curr_buff, curr_buff_cnt);
1522
1523	return 0;
1524}
1525
1526static int cc_hash_import(struct ahash_request *req, const void *in)
1527{
1528	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1529	struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1530	struct device *dev = drvdata_to_dev(ctx->drvdata);
1531	struct ahash_req_ctx *state = ahash_request_ctx(req);
1532	u32 tmp;
1533
1534	memcpy(&tmp, in, sizeof(u32));
1535	if (tmp != CC_EXPORT_MAGIC)
1536		return -EINVAL;
1537	in += sizeof(u32);
1538
1539	cc_init_req(dev, state, ctx);
1540
1541	memcpy(state->digest_buff, in, ctx->inter_digestsize);
1542	in += ctx->inter_digestsize;
1543
1544	memcpy(state->digest_bytes_len, in, ctx->hash_len);
1545	in += ctx->hash_len;
1546
1547	/* Sanity check the data as much as possible */
1548	memcpy(&tmp, in, sizeof(u32));
1549	if (tmp > CC_MAX_HASH_BLCK_SIZE)
1550		return -EINVAL;
1551	in += sizeof(u32);
1552
1553	state->buf_cnt[0] = tmp;
1554	memcpy(state->buffers[0], in, tmp);
1555
1556	return 0;
1557}
1558
1559struct cc_hash_template {
1560	char name[CRYPTO_MAX_ALG_NAME];
1561	char driver_name[CRYPTO_MAX_ALG_NAME];
1562	char mac_name[CRYPTO_MAX_ALG_NAME];
1563	char mac_driver_name[CRYPTO_MAX_ALG_NAME];
1564	unsigned int blocksize;
1565	bool is_mac;
1566	bool synchronize;
1567	struct ahash_alg template_ahash;
1568	int hash_mode;
1569	int hw_mode;
1570	int inter_digestsize;
1571	struct cc_drvdata *drvdata;
1572	u32 min_hw_rev;
1573	enum cc_std_body std_body;
1574};
1575
1576#define CC_STATE_SIZE(_x) \
1577	((_x) + HASH_MAX_LEN_SIZE + CC_MAX_HASH_BLCK_SIZE + (2 * sizeof(u32)))
1578
1579/* hash descriptors */
1580static struct cc_hash_template driver_hash[] = {
1581	//Asynchronize hash template
1582	{
1583		.name = "sha1",
1584		.driver_name = "sha1-ccree",
1585		.mac_name = "hmac(sha1)",
1586		.mac_driver_name = "hmac-sha1-ccree",
1587		.blocksize = SHA1_BLOCK_SIZE,
1588		.is_mac = true,
1589		.synchronize = false,
1590		.template_ahash = {
1591			.init = cc_hash_init,
1592			.update = cc_hash_update,
1593			.final = cc_hash_final,
1594			.finup = cc_hash_finup,
1595			.digest = cc_hash_digest,
1596			.export = cc_hash_export,
1597			.import = cc_hash_import,
1598			.setkey = cc_hash_setkey,
1599			.halg = {
1600				.digestsize = SHA1_DIGEST_SIZE,
1601				.statesize = CC_STATE_SIZE(SHA1_DIGEST_SIZE),
1602			},
1603		},
1604		.hash_mode = DRV_HASH_SHA1,
1605		.hw_mode = DRV_HASH_HW_SHA1,
1606		.inter_digestsize = SHA1_DIGEST_SIZE,
1607		.min_hw_rev = CC_HW_REV_630,
1608		.std_body = CC_STD_NIST,
1609	},
1610	{
1611		.name = "sha256",
1612		.driver_name = "sha256-ccree",
1613		.mac_name = "hmac(sha256)",
1614		.mac_driver_name = "hmac-sha256-ccree",
1615		.blocksize = SHA256_BLOCK_SIZE,
1616		.is_mac = true,
1617		.template_ahash = {
1618			.init = cc_hash_init,
1619			.update = cc_hash_update,
1620			.final = cc_hash_final,
1621			.finup = cc_hash_finup,
1622			.digest = cc_hash_digest,
1623			.export = cc_hash_export,
1624			.import = cc_hash_import,
1625			.setkey = cc_hash_setkey,
1626			.halg = {
1627				.digestsize = SHA256_DIGEST_SIZE,
1628				.statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE)
1629			},
1630		},
1631		.hash_mode = DRV_HASH_SHA256,
1632		.hw_mode = DRV_HASH_HW_SHA256,
1633		.inter_digestsize = SHA256_DIGEST_SIZE,
1634		.min_hw_rev = CC_HW_REV_630,
1635		.std_body = CC_STD_NIST,
1636	},
1637	{
1638		.name = "sha224",
1639		.driver_name = "sha224-ccree",
1640		.mac_name = "hmac(sha224)",
1641		.mac_driver_name = "hmac-sha224-ccree",
1642		.blocksize = SHA224_BLOCK_SIZE,
1643		.is_mac = true,
1644		.template_ahash = {
1645			.init = cc_hash_init,
1646			.update = cc_hash_update,
1647			.final = cc_hash_final,
1648			.finup = cc_hash_finup,
1649			.digest = cc_hash_digest,
1650			.export = cc_hash_export,
1651			.import = cc_hash_import,
1652			.setkey = cc_hash_setkey,
1653			.halg = {
1654				.digestsize = SHA224_DIGEST_SIZE,
1655				.statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE),
1656			},
1657		},
1658		.hash_mode = DRV_HASH_SHA224,
1659		.hw_mode = DRV_HASH_HW_SHA256,
1660		.inter_digestsize = SHA256_DIGEST_SIZE,
1661		.min_hw_rev = CC_HW_REV_630,
1662		.std_body = CC_STD_NIST,
1663	},
1664	{
1665		.name = "sha384",
1666		.driver_name = "sha384-ccree",
1667		.mac_name = "hmac(sha384)",
1668		.mac_driver_name = "hmac-sha384-ccree",
1669		.blocksize = SHA384_BLOCK_SIZE,
1670		.is_mac = true,
1671		.template_ahash = {
1672			.init = cc_hash_init,
1673			.update = cc_hash_update,
1674			.final = cc_hash_final,
1675			.finup = cc_hash_finup,
1676			.digest = cc_hash_digest,
1677			.export = cc_hash_export,
1678			.import = cc_hash_import,
1679			.setkey = cc_hash_setkey,
1680			.halg = {
1681				.digestsize = SHA384_DIGEST_SIZE,
1682				.statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
1683			},
1684		},
1685		.hash_mode = DRV_HASH_SHA384,
1686		.hw_mode = DRV_HASH_HW_SHA512,
1687		.inter_digestsize = SHA512_DIGEST_SIZE,
1688		.min_hw_rev = CC_HW_REV_712,
1689		.std_body = CC_STD_NIST,
1690	},
1691	{
1692		.name = "sha512",
1693		.driver_name = "sha512-ccree",
1694		.mac_name = "hmac(sha512)",
1695		.mac_driver_name = "hmac-sha512-ccree",
1696		.blocksize = SHA512_BLOCK_SIZE,
1697		.is_mac = true,
1698		.template_ahash = {
1699			.init = cc_hash_init,
1700			.update = cc_hash_update,
1701			.final = cc_hash_final,
1702			.finup = cc_hash_finup,
1703			.digest = cc_hash_digest,
1704			.export = cc_hash_export,
1705			.import = cc_hash_import,
1706			.setkey = cc_hash_setkey,
1707			.halg = {
1708				.digestsize = SHA512_DIGEST_SIZE,
1709				.statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
1710			},
1711		},
1712		.hash_mode = DRV_HASH_SHA512,
1713		.hw_mode = DRV_HASH_HW_SHA512,
1714		.inter_digestsize = SHA512_DIGEST_SIZE,
1715		.min_hw_rev = CC_HW_REV_712,
1716		.std_body = CC_STD_NIST,
1717	},
1718	{
1719		.name = "md5",
1720		.driver_name = "md5-ccree",
1721		.mac_name = "hmac(md5)",
1722		.mac_driver_name = "hmac-md5-ccree",
1723		.blocksize = MD5_HMAC_BLOCK_SIZE,
1724		.is_mac = true,
1725		.template_ahash = {
1726			.init = cc_hash_init,
1727			.update = cc_hash_update,
1728			.final = cc_hash_final,
1729			.finup = cc_hash_finup,
1730			.digest = cc_hash_digest,
1731			.export = cc_hash_export,
1732			.import = cc_hash_import,
1733			.setkey = cc_hash_setkey,
1734			.halg = {
1735				.digestsize = MD5_DIGEST_SIZE,
1736				.statesize = CC_STATE_SIZE(MD5_DIGEST_SIZE),
1737			},
1738		},
1739		.hash_mode = DRV_HASH_MD5,
1740		.hw_mode = DRV_HASH_HW_MD5,
1741		.inter_digestsize = MD5_DIGEST_SIZE,
1742		.min_hw_rev = CC_HW_REV_630,
1743		.std_body = CC_STD_NIST,
1744	},
1745	{
1746		.name = "sm3",
1747		.driver_name = "sm3-ccree",
1748		.blocksize = SM3_BLOCK_SIZE,
1749		.is_mac = false,
1750		.template_ahash = {
1751			.init = cc_hash_init,
1752			.update = cc_hash_update,
1753			.final = cc_hash_final,
1754			.finup = cc_hash_finup,
1755			.digest = cc_hash_digest,
1756			.export = cc_hash_export,
1757			.import = cc_hash_import,
1758			.setkey = cc_hash_setkey,
1759			.halg = {
1760				.digestsize = SM3_DIGEST_SIZE,
1761				.statesize = CC_STATE_SIZE(SM3_DIGEST_SIZE),
1762			},
1763		},
1764		.hash_mode = DRV_HASH_SM3,
1765		.hw_mode = DRV_HASH_HW_SM3,
1766		.inter_digestsize = SM3_DIGEST_SIZE,
1767		.min_hw_rev = CC_HW_REV_713,
1768		.std_body = CC_STD_OSCCA,
1769	},
1770	{
1771		.mac_name = "xcbc(aes)",
1772		.mac_driver_name = "xcbc-aes-ccree",
1773		.blocksize = AES_BLOCK_SIZE,
1774		.is_mac = true,
1775		.template_ahash = {
1776			.init = cc_hash_init,
1777			.update = cc_mac_update,
1778			.final = cc_mac_final,
1779			.finup = cc_mac_finup,
1780			.digest = cc_mac_digest,
1781			.setkey = cc_xcbc_setkey,
1782			.export = cc_hash_export,
1783			.import = cc_hash_import,
1784			.halg = {
1785				.digestsize = AES_BLOCK_SIZE,
1786				.statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
1787			},
1788		},
1789		.hash_mode = DRV_HASH_NULL,
1790		.hw_mode = DRV_CIPHER_XCBC_MAC,
1791		.inter_digestsize = AES_BLOCK_SIZE,
1792		.min_hw_rev = CC_HW_REV_630,
1793		.std_body = CC_STD_NIST,
1794	},
1795	{
1796		.mac_name = "cmac(aes)",
1797		.mac_driver_name = "cmac-aes-ccree",
1798		.blocksize = AES_BLOCK_SIZE,
1799		.is_mac = true,
1800		.template_ahash = {
1801			.init = cc_hash_init,
1802			.update = cc_mac_update,
1803			.final = cc_mac_final,
1804			.finup = cc_mac_finup,
1805			.digest = cc_mac_digest,
1806			.setkey = cc_cmac_setkey,
1807			.export = cc_hash_export,
1808			.import = cc_hash_import,
1809			.halg = {
1810				.digestsize = AES_BLOCK_SIZE,
1811				.statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
1812			},
1813		},
1814		.hash_mode = DRV_HASH_NULL,
1815		.hw_mode = DRV_CIPHER_CMAC,
1816		.inter_digestsize = AES_BLOCK_SIZE,
1817		.min_hw_rev = CC_HW_REV_630,
1818		.std_body = CC_STD_NIST,
1819	},
1820};
1821
1822static struct cc_hash_alg *cc_alloc_hash_alg(struct cc_hash_template *template,
1823					     struct device *dev, bool keyed)
1824{
1825	struct cc_hash_alg *t_crypto_alg;
1826	struct crypto_alg *alg;
1827	struct ahash_alg *halg;
1828
1829	t_crypto_alg = kzalloc(sizeof(*t_crypto_alg), GFP_KERNEL);
1830	if (!t_crypto_alg)
1831		return ERR_PTR(-ENOMEM);
1832
1833	t_crypto_alg->ahash_alg = template->template_ahash;
1834	halg = &t_crypto_alg->ahash_alg;
1835	alg = &halg->halg.base;
1836
1837	if (keyed) {
1838		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1839			 template->mac_name);
1840		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1841			 template->mac_driver_name);
1842	} else {
1843		halg->setkey = NULL;
1844		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1845			 template->name);
1846		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1847			 template->driver_name);
1848	}
1849	alg->cra_module = THIS_MODULE;
1850	alg->cra_ctxsize = sizeof(struct cc_hash_ctx);
1851	alg->cra_priority = CC_CRA_PRIO;
1852	alg->cra_blocksize = template->blocksize;
1853	alg->cra_alignmask = 0;
1854	alg->cra_exit = cc_cra_exit;
1855
1856	alg->cra_init = cc_cra_init;
1857	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
1858
1859	t_crypto_alg->hash_mode = template->hash_mode;
1860	t_crypto_alg->hw_mode = template->hw_mode;
1861	t_crypto_alg->inter_digestsize = template->inter_digestsize;
1862
1863	return t_crypto_alg;
1864}
1865
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1866int cc_init_hash_sram(struct cc_drvdata *drvdata)
1867{
1868	struct cc_hash_handle *hash_handle = drvdata->hash_handle;
1869	cc_sram_addr_t sram_buff_ofs = hash_handle->digest_len_sram_addr;
1870	unsigned int larval_seq_len = 0;
1871	struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)];
1872	bool large_sha_supported = (drvdata->hw_rev >= CC_HW_REV_712);
1873	bool sm3_supported = (drvdata->hw_rev >= CC_HW_REV_713);
1874	int rc = 0;
1875
1876	/* Copy-to-sram digest-len */
1877	cc_set_sram_desc(cc_digest_len_init, sram_buff_ofs,
1878			 ARRAY_SIZE(cc_digest_len_init), larval_seq,
1879			 &larval_seq_len);
1880	rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1881	if (rc)
1882		goto init_digest_const_err;
1883
1884	sram_buff_ofs += sizeof(cc_digest_len_init);
1885	larval_seq_len = 0;
1886
1887	if (large_sha_supported) {
1888		/* Copy-to-sram digest-len for sha384/512 */
1889		cc_set_sram_desc(cc_digest_len_sha512_init, sram_buff_ofs,
1890				 ARRAY_SIZE(cc_digest_len_sha512_init),
1891				 larval_seq, &larval_seq_len);
1892		rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1893		if (rc)
1894			goto init_digest_const_err;
1895
1896		sram_buff_ofs += sizeof(cc_digest_len_sha512_init);
1897		larval_seq_len = 0;
1898	}
1899
1900	/* The initial digests offset */
1901	hash_handle->larval_digest_sram_addr = sram_buff_ofs;
1902
1903	/* Copy-to-sram initial SHA* digests */
1904	cc_set_sram_desc(cc_md5_init, sram_buff_ofs, ARRAY_SIZE(cc_md5_init),
1905			 larval_seq, &larval_seq_len);
1906	rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1907	if (rc)
1908		goto init_digest_const_err;
1909	sram_buff_ofs += sizeof(cc_md5_init);
1910	larval_seq_len = 0;
1911
1912	cc_set_sram_desc(cc_sha1_init, sram_buff_ofs,
1913			 ARRAY_SIZE(cc_sha1_init), larval_seq,
1914			 &larval_seq_len);
1915	rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1916	if (rc)
1917		goto init_digest_const_err;
1918	sram_buff_ofs += sizeof(cc_sha1_init);
1919	larval_seq_len = 0;
1920
1921	cc_set_sram_desc(cc_sha224_init, sram_buff_ofs,
1922			 ARRAY_SIZE(cc_sha224_init), larval_seq,
1923			 &larval_seq_len);
1924	rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1925	if (rc)
1926		goto init_digest_const_err;
1927	sram_buff_ofs += sizeof(cc_sha224_init);
1928	larval_seq_len = 0;
1929
1930	cc_set_sram_desc(cc_sha256_init, sram_buff_ofs,
1931			 ARRAY_SIZE(cc_sha256_init), larval_seq,
1932			 &larval_seq_len);
1933	rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1934	if (rc)
1935		goto init_digest_const_err;
1936	sram_buff_ofs += sizeof(cc_sha256_init);
1937	larval_seq_len = 0;
1938
1939	if (sm3_supported) {
1940		cc_set_sram_desc(cc_sm3_init, sram_buff_ofs,
1941				 ARRAY_SIZE(cc_sm3_init), larval_seq,
1942				 &larval_seq_len);
1943		rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1944		if (rc)
1945			goto init_digest_const_err;
1946		sram_buff_ofs += sizeof(cc_sm3_init);
1947		larval_seq_len = 0;
1948	}
1949
1950	if (large_sha_supported) {
1951		cc_set_sram_desc((u32 *)cc_sha384_init, sram_buff_ofs,
1952				 (ARRAY_SIZE(cc_sha384_init) * 2), larval_seq,
1953				 &larval_seq_len);
1954		rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1955		if (rc)
1956			goto init_digest_const_err;
1957		sram_buff_ofs += sizeof(cc_sha384_init);
1958		larval_seq_len = 0;
1959
1960		cc_set_sram_desc((u32 *)cc_sha512_init, sram_buff_ofs,
1961				 (ARRAY_SIZE(cc_sha512_init) * 2), larval_seq,
1962				 &larval_seq_len);
1963		rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1964		if (rc)
1965			goto init_digest_const_err;
1966	}
1967
1968init_digest_const_err:
1969	return rc;
1970}
1971
1972static void __init cc_swap_dwords(u32 *buf, unsigned long size)
1973{
1974	int i;
1975	u32 tmp;
1976
1977	for (i = 0; i < size; i += 2) {
1978		tmp = buf[i];
1979		buf[i] = buf[i + 1];
1980		buf[i + 1] = tmp;
1981	}
1982}
1983
1984/*
1985 * Due to the way the HW works we need to swap every
1986 * double word in the SHA384 and SHA512 larval hashes
1987 */
1988void __init cc_hash_global_init(void)
1989{
1990	cc_swap_dwords((u32 *)&cc_sha384_init, (ARRAY_SIZE(cc_sha384_init) * 2));
1991	cc_swap_dwords((u32 *)&cc_sha512_init, (ARRAY_SIZE(cc_sha512_init) * 2));
1992}
1993
1994int cc_hash_alloc(struct cc_drvdata *drvdata)
1995{
1996	struct cc_hash_handle *hash_handle;
1997	cc_sram_addr_t sram_buff;
1998	u32 sram_size_to_alloc;
1999	struct device *dev = drvdata_to_dev(drvdata);
2000	int rc = 0;
2001	int alg;
2002
2003	hash_handle = kzalloc(sizeof(*hash_handle), GFP_KERNEL);
2004	if (!hash_handle)
2005		return -ENOMEM;
2006
2007	INIT_LIST_HEAD(&hash_handle->hash_list);
2008	drvdata->hash_handle = hash_handle;
2009
2010	sram_size_to_alloc = sizeof(cc_digest_len_init) +
2011			sizeof(cc_md5_init) +
2012			sizeof(cc_sha1_init) +
2013			sizeof(cc_sha224_init) +
2014			sizeof(cc_sha256_init);
2015
2016	if (drvdata->hw_rev >= CC_HW_REV_713)
2017		sram_size_to_alloc += sizeof(cc_sm3_init);
2018
2019	if (drvdata->hw_rev >= CC_HW_REV_712)
2020		sram_size_to_alloc += sizeof(cc_digest_len_sha512_init) +
2021			sizeof(cc_sha384_init) + sizeof(cc_sha512_init);
2022
2023	sram_buff = cc_sram_alloc(drvdata, sram_size_to_alloc);
2024	if (sram_buff == NULL_SRAM_ADDR) {
2025		dev_err(dev, "SRAM pool exhausted\n");
2026		rc = -ENOMEM;
2027		goto fail;
2028	}
2029
2030	/* The initial digest-len offset */
2031	hash_handle->digest_len_sram_addr = sram_buff;
2032
2033	/*must be set before the alg registration as it is being used there*/
2034	rc = cc_init_hash_sram(drvdata);
2035	if (rc) {
2036		dev_err(dev, "Init digest CONST failed (rc=%d)\n", rc);
2037		goto fail;
2038	}
2039
2040	/* ahash registration */
2041	for (alg = 0; alg < ARRAY_SIZE(driver_hash); alg++) {
2042		struct cc_hash_alg *t_alg;
2043		int hw_mode = driver_hash[alg].hw_mode;
2044
2045		/* Check that the HW revision and variants are suitable */
2046		if ((driver_hash[alg].min_hw_rev > drvdata->hw_rev) ||
2047		    !(drvdata->std_bodies & driver_hash[alg].std_body))
2048			continue;
2049
2050		if (driver_hash[alg].is_mac) {
2051			/* register hmac version */
2052			t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, true);
2053			if (IS_ERR(t_alg)) {
2054				rc = PTR_ERR(t_alg);
2055				dev_err(dev, "%s alg allocation failed\n",
2056					driver_hash[alg].driver_name);
2057				goto fail;
2058			}
2059			t_alg->drvdata = drvdata;
2060
2061			rc = crypto_register_ahash(&t_alg->ahash_alg);
2062			if (rc) {
2063				dev_err(dev, "%s alg registration failed\n",
2064					driver_hash[alg].driver_name);
2065				kfree(t_alg);
2066				goto fail;
2067			} else {
2068				list_add_tail(&t_alg->entry,
2069					      &hash_handle->hash_list);
2070			}
 
 
2071		}
2072		if (hw_mode == DRV_CIPHER_XCBC_MAC ||
2073		    hw_mode == DRV_CIPHER_CMAC)
2074			continue;
2075
2076		/* register hash version */
2077		t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, false);
2078		if (IS_ERR(t_alg)) {
2079			rc = PTR_ERR(t_alg);
2080			dev_err(dev, "%s alg allocation failed\n",
2081				driver_hash[alg].driver_name);
2082			goto fail;
2083		}
2084		t_alg->drvdata = drvdata;
2085
2086		rc = crypto_register_ahash(&t_alg->ahash_alg);
2087		if (rc) {
2088			dev_err(dev, "%s alg registration failed\n",
2089				driver_hash[alg].driver_name);
2090			kfree(t_alg);
2091			goto fail;
2092		} else {
2093			list_add_tail(&t_alg->entry, &hash_handle->hash_list);
2094		}
 
 
2095	}
2096
2097	return 0;
2098
2099fail:
2100	kfree(drvdata->hash_handle);
2101	drvdata->hash_handle = NULL;
2102	return rc;
2103}
2104
2105int cc_hash_free(struct cc_drvdata *drvdata)
2106{
2107	struct cc_hash_alg *t_hash_alg, *hash_n;
2108	struct cc_hash_handle *hash_handle = drvdata->hash_handle;
2109
2110	if (hash_handle) {
2111		list_for_each_entry_safe(t_hash_alg, hash_n,
2112					 &hash_handle->hash_list, entry) {
2113			crypto_unregister_ahash(&t_hash_alg->ahash_alg);
2114			list_del(&t_hash_alg->entry);
2115			kfree(t_hash_alg);
2116		}
2117
2118		kfree(hash_handle);
2119		drvdata->hash_handle = NULL;
2120	}
2121	return 0;
2122}
2123
2124static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
2125			  unsigned int *seq_size)
2126{
2127	unsigned int idx = *seq_size;
2128	struct ahash_req_ctx *state = ahash_request_ctx(areq);
2129	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2130	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
2131
2132	/* Setup XCBC MAC K1 */
2133	hw_desc_init(&desc[idx]);
2134	set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
2135					    XCBC_MAC_K1_OFFSET),
2136		     CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2137	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
2138	set_hash_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC, ctx->hash_mode);
2139	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2140	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2141	set_flow_mode(&desc[idx], S_DIN_to_AES);
2142	idx++;
2143
2144	/* Setup XCBC MAC K2 */
2145	hw_desc_init(&desc[idx]);
2146	set_din_type(&desc[idx], DMA_DLLI,
2147		     (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET),
2148		     CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2149	set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
2150	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2151	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2152	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2153	set_flow_mode(&desc[idx], S_DIN_to_AES);
2154	idx++;
2155
2156	/* Setup XCBC MAC K3 */
2157	hw_desc_init(&desc[idx]);
2158	set_din_type(&desc[idx], DMA_DLLI,
2159		     (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET),
2160		     CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2161	set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
2162	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2163	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2164	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2165	set_flow_mode(&desc[idx], S_DIN_to_AES);
2166	idx++;
2167
2168	/* Loading MAC state */
2169	hw_desc_init(&desc[idx]);
2170	set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
2171		     CC_AES_BLOCK_SIZE, NS_BIT);
2172	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
2173	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2174	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2175	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2176	set_flow_mode(&desc[idx], S_DIN_to_AES);
2177	idx++;
2178	*seq_size = idx;
2179}
2180
2181static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[],
2182			  unsigned int *seq_size)
2183{
2184	unsigned int idx = *seq_size;
2185	struct ahash_req_ctx *state = ahash_request_ctx(areq);
2186	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2187	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
2188
2189	/* Setup CMAC Key */
2190	hw_desc_init(&desc[idx]);
2191	set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
2192		     ((ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
2193		      ctx->key_params.keylen), NS_BIT);
2194	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
2195	set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
2196	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2197	set_key_size_aes(&desc[idx], ctx->key_params.keylen);
2198	set_flow_mode(&desc[idx], S_DIN_to_AES);
2199	idx++;
2200
2201	/* Load MAC state */
2202	hw_desc_init(&desc[idx]);
2203	set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
2204		     CC_AES_BLOCK_SIZE, NS_BIT);
2205	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
2206	set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
2207	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2208	set_key_size_aes(&desc[idx], ctx->key_params.keylen);
2209	set_flow_mode(&desc[idx], S_DIN_to_AES);
2210	idx++;
2211	*seq_size = idx;
2212}
2213
2214static void cc_set_desc(struct ahash_req_ctx *areq_ctx,
2215			struct cc_hash_ctx *ctx, unsigned int flow_mode,
2216			struct cc_hw_desc desc[], bool is_not_last_data,
2217			unsigned int *seq_size)
2218{
2219	unsigned int idx = *seq_size;
2220	struct device *dev = drvdata_to_dev(ctx->drvdata);
2221
2222	if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_DLLI) {
2223		hw_desc_init(&desc[idx]);
2224		set_din_type(&desc[idx], DMA_DLLI,
2225			     sg_dma_address(areq_ctx->curr_sg),
2226			     areq_ctx->curr_sg->length, NS_BIT);
2227		set_flow_mode(&desc[idx], flow_mode);
2228		idx++;
2229	} else {
2230		if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
2231			dev_dbg(dev, " NULL mode\n");
2232			/* nothing to build */
2233			return;
2234		}
2235		/* bypass */
2236		hw_desc_init(&desc[idx]);
2237		set_din_type(&desc[idx], DMA_DLLI,
2238			     areq_ctx->mlli_params.mlli_dma_addr,
2239			     areq_ctx->mlli_params.mlli_len, NS_BIT);
2240		set_dout_sram(&desc[idx], ctx->drvdata->mlli_sram_addr,
2241			      areq_ctx->mlli_params.mlli_len);
2242		set_flow_mode(&desc[idx], BYPASS);
2243		idx++;
2244		/* process */
2245		hw_desc_init(&desc[idx]);
2246		set_din_type(&desc[idx], DMA_MLLI,
2247			     ctx->drvdata->mlli_sram_addr,
2248			     areq_ctx->mlli_nents, NS_BIT);
2249		set_flow_mode(&desc[idx], flow_mode);
2250		idx++;
2251	}
2252	if (is_not_last_data)
2253		set_din_not_last_indication(&desc[(idx - 1)]);
2254	/* return updated desc sequence size */
2255	*seq_size = idx;
2256}
2257
2258static const void *cc_larval_digest(struct device *dev, u32 mode)
2259{
2260	switch (mode) {
2261	case DRV_HASH_MD5:
2262		return cc_md5_init;
2263	case DRV_HASH_SHA1:
2264		return cc_sha1_init;
2265	case DRV_HASH_SHA224:
2266		return cc_sha224_init;
2267	case DRV_HASH_SHA256:
2268		return cc_sha256_init;
2269	case DRV_HASH_SHA384:
2270		return cc_sha384_init;
2271	case DRV_HASH_SHA512:
2272		return cc_sha512_init;
2273	case DRV_HASH_SM3:
2274		return cc_sm3_init;
2275	default:
2276		dev_err(dev, "Invalid hash mode (%d)\n", mode);
2277		return cc_md5_init;
2278	}
2279}
2280
2281/*!
2282 * Gets the address of the initial digest in SRAM
2283 * according to the given hash mode
2284 *
2285 * \param drvdata
2286 * \param mode The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256
2287 *
2288 * \return u32 The address of the initial digest in SRAM
 
2289 */
2290cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode)
2291{
2292	struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
2293	struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
2294	struct device *dev = drvdata_to_dev(_drvdata);
2295	bool sm3_supported = (_drvdata->hw_rev >= CC_HW_REV_713);
2296	cc_sram_addr_t addr;
2297
2298	switch (mode) {
2299	case DRV_HASH_NULL:
2300		break; /*Ignore*/
2301	case DRV_HASH_MD5:
2302		return (hash_handle->larval_digest_sram_addr);
2303	case DRV_HASH_SHA1:
2304		return (hash_handle->larval_digest_sram_addr +
2305			sizeof(cc_md5_init));
2306	case DRV_HASH_SHA224:
2307		return (hash_handle->larval_digest_sram_addr +
2308			sizeof(cc_md5_init) +
2309			sizeof(cc_sha1_init));
2310	case DRV_HASH_SHA256:
2311		return (hash_handle->larval_digest_sram_addr +
2312			sizeof(cc_md5_init) +
2313			sizeof(cc_sha1_init) +
2314			sizeof(cc_sha224_init));
2315	case DRV_HASH_SM3:
2316		return (hash_handle->larval_digest_sram_addr +
2317			sizeof(cc_md5_init) +
2318			sizeof(cc_sha1_init) +
2319			sizeof(cc_sha224_init) +
2320			sizeof(cc_sha256_init));
2321	case DRV_HASH_SHA384:
2322		addr = (hash_handle->larval_digest_sram_addr +
2323			sizeof(cc_md5_init) +
2324			sizeof(cc_sha1_init) +
2325			sizeof(cc_sha224_init) +
2326			sizeof(cc_sha256_init));
2327		if (sm3_supported)
2328			addr += sizeof(cc_sm3_init);
2329		return addr;
2330	case DRV_HASH_SHA512:
2331		addr = (hash_handle->larval_digest_sram_addr +
2332			sizeof(cc_md5_init) +
2333			sizeof(cc_sha1_init) +
2334			sizeof(cc_sha224_init) +
2335			sizeof(cc_sha256_init) +
2336			sizeof(cc_sha384_init));
2337		if (sm3_supported)
2338			addr += sizeof(cc_sm3_init);
2339		return addr;
2340	default:
2341		dev_err(dev, "Invalid hash mode (%d)\n", mode);
2342	}
2343
2344	/*This is valid wrong value to avoid kernel crash*/
2345	return hash_handle->larval_digest_sram_addr;
2346}
2347
2348cc_sram_addr_t
2349cc_digest_len_addr(void *drvdata, u32 mode)
2350{
2351	struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
2352	struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
2353	cc_sram_addr_t digest_len_addr = hash_handle->digest_len_sram_addr;
2354
2355	switch (mode) {
2356	case DRV_HASH_SHA1:
2357	case DRV_HASH_SHA224:
2358	case DRV_HASH_SHA256:
2359	case DRV_HASH_MD5:
2360		return digest_len_addr;
2361#if (CC_DEV_SHA_MAX > 256)
2362	case DRV_HASH_SHA384:
2363	case DRV_HASH_SHA512:
2364		return  digest_len_addr + sizeof(cc_digest_len_init);
2365#endif
2366	default:
2367		return digest_len_addr; /*to avoid kernel crash*/
2368	}
2369}