Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
   3
   4#include <linux/kernel.h>
   5#include <linux/module.h>
   6#include <crypto/algapi.h>
   7#include <crypto/hash.h>
   8#include <crypto/md5.h>
   9#include <crypto/sm3.h>
  10#include <crypto/internal/hash.h>
  11
  12#include "cc_driver.h"
  13#include "cc_request_mgr.h"
  14#include "cc_buffer_mgr.h"
  15#include "cc_hash.h"
  16#include "cc_sram_mgr.h"
  17
  18#define CC_MAX_HASH_SEQ_LEN 12
  19#define CC_MAX_OPAD_KEYS_SIZE CC_MAX_HASH_BLCK_SIZE
  20#define CC_SM3_HASH_LEN_SIZE 8
  21
  22struct cc_hash_handle {
  23	u32 digest_len_sram_addr;	/* const value in SRAM*/
  24	u32 larval_digest_sram_addr;   /* const value in SRAM */
  25	struct list_head hash_list;
  26};
  27
  28static const u32 cc_digest_len_init[] = {
  29	0x00000040, 0x00000000, 0x00000000, 0x00000000 };
  30static const u32 cc_md5_init[] = {
  31	SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
  32static const u32 cc_sha1_init[] = {
  33	SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
  34static const u32 cc_sha224_init[] = {
  35	SHA224_H7, SHA224_H6, SHA224_H5, SHA224_H4,
  36	SHA224_H3, SHA224_H2, SHA224_H1, SHA224_H0 };
  37static const u32 cc_sha256_init[] = {
  38	SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4,
  39	SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
  40static const u32 cc_digest_len_sha512_init[] = {
  41	0x00000080, 0x00000000, 0x00000000, 0x00000000 };
  42
  43/*
  44 * Due to the way the HW works, every double word in the SHA384 and SHA512
  45 * larval hashes must be stored in hi/lo order
  46 */
  47#define hilo(x)	upper_32_bits(x), lower_32_bits(x)
  48static const u32 cc_sha384_init[] = {
  49	hilo(SHA384_H7), hilo(SHA384_H6), hilo(SHA384_H5), hilo(SHA384_H4),
  50	hilo(SHA384_H3), hilo(SHA384_H2), hilo(SHA384_H1), hilo(SHA384_H0) };
  51static const u32 cc_sha512_init[] = {
  52	hilo(SHA512_H7), hilo(SHA512_H6), hilo(SHA512_H5), hilo(SHA512_H4),
  53	hilo(SHA512_H3), hilo(SHA512_H2), hilo(SHA512_H1), hilo(SHA512_H0) };
  54
  55static const u32 cc_sm3_init[] = {
  56	SM3_IVH, SM3_IVG, SM3_IVF, SM3_IVE,
  57	SM3_IVD, SM3_IVC, SM3_IVB, SM3_IVA };
  58
  59static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
  60			  unsigned int *seq_size);
  61
  62static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[],
  63			  unsigned int *seq_size);
  64
  65static const void *cc_larval_digest(struct device *dev, u32 mode);
  66
  67struct cc_hash_alg {
  68	struct list_head entry;
  69	int hash_mode;
  70	int hw_mode;
  71	int inter_digestsize;
  72	struct cc_drvdata *drvdata;
  73	struct ahash_alg ahash_alg;
  74};
  75
  76struct hash_key_req_ctx {
  77	u32 keylen;
  78	dma_addr_t key_dma_addr;
  79	u8 *key;
  80};
  81
  82/* hash per-session context */
  83struct cc_hash_ctx {
  84	struct cc_drvdata *drvdata;
  85	/* holds the origin digest; the digest after "setkey" if HMAC,*
  86	 * the initial digest if HASH.
  87	 */
  88	u8 digest_buff[CC_MAX_HASH_DIGEST_SIZE]  ____cacheline_aligned;
  89	u8 opad_tmp_keys_buff[CC_MAX_OPAD_KEYS_SIZE]  ____cacheline_aligned;
  90
  91	dma_addr_t opad_tmp_keys_dma_addr  ____cacheline_aligned;
  92	dma_addr_t digest_buff_dma_addr;
  93	/* use for hmac with key large then mode block size */
  94	struct hash_key_req_ctx key_params;
  95	int hash_mode;
  96	int hw_mode;
  97	int inter_digestsize;
  98	unsigned int hash_len;
  99	struct completion setkey_comp;
 100	bool is_hmac;
 101};
 102
 103static void cc_set_desc(struct ahash_req_ctx *areq_ctx, struct cc_hash_ctx *ctx,
 104			unsigned int flow_mode, struct cc_hw_desc desc[],
 105			bool is_not_last_data, unsigned int *seq_size);
 106
 107static void cc_set_endianity(u32 mode, struct cc_hw_desc *desc)
 108{
 109	if (mode == DRV_HASH_MD5 || mode == DRV_HASH_SHA384 ||
 110	    mode == DRV_HASH_SHA512) {
 111		set_bytes_swap(desc, 1);
 112	} else {
 113		set_cipher_config0(desc, HASH_DIGEST_RESULT_LITTLE_ENDIAN);
 114	}
 115}
 116
 117static int cc_map_result(struct device *dev, struct ahash_req_ctx *state,
 118			 unsigned int digestsize)
 119{
 120	state->digest_result_dma_addr =
 121		dma_map_single(dev, state->digest_result_buff,
 122			       digestsize, DMA_BIDIRECTIONAL);
 123	if (dma_mapping_error(dev, state->digest_result_dma_addr)) {
 124		dev_err(dev, "Mapping digest result buffer %u B for DMA failed\n",
 125			digestsize);
 126		return -ENOMEM;
 127	}
 128	dev_dbg(dev, "Mapped digest result buffer %u B at va=%pK to dma=%pad\n",
 129		digestsize, state->digest_result_buff,
 130		&state->digest_result_dma_addr);
 131
 132	return 0;
 133}
 134
 135static void cc_init_req(struct device *dev, struct ahash_req_ctx *state,
 136			struct cc_hash_ctx *ctx)
 137{
 138	bool is_hmac = ctx->is_hmac;
 139
 140	memset(state, 0, sizeof(*state));
 141
 142	if (is_hmac) {
 143		if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC &&
 144		    ctx->hw_mode != DRV_CIPHER_CMAC) {
 145			dma_sync_single_for_cpu(dev, ctx->digest_buff_dma_addr,
 146						ctx->inter_digestsize,
 147						DMA_BIDIRECTIONAL);
 148
 149			memcpy(state->digest_buff, ctx->digest_buff,
 150			       ctx->inter_digestsize);
 151			if (ctx->hash_mode == DRV_HASH_SHA512 ||
 152			    ctx->hash_mode == DRV_HASH_SHA384)
 153				memcpy(state->digest_bytes_len,
 154				       cc_digest_len_sha512_init,
 155				       ctx->hash_len);
 156			else
 157				memcpy(state->digest_bytes_len,
 158				       cc_digest_len_init,
 159				       ctx->hash_len);
 160		}
 161
 162		if (ctx->hash_mode != DRV_HASH_NULL) {
 163			dma_sync_single_for_cpu(dev,
 164						ctx->opad_tmp_keys_dma_addr,
 165						ctx->inter_digestsize,
 166						DMA_BIDIRECTIONAL);
 167			memcpy(state->opad_digest_buff,
 168			       ctx->opad_tmp_keys_buff, ctx->inter_digestsize);
 169		}
 170	} else { /*hash*/
 171		/* Copy the initial digests if hash flow. */
 172		const void *larval = cc_larval_digest(dev, ctx->hash_mode);
 173
 174		memcpy(state->digest_buff, larval, ctx->inter_digestsize);
 175	}
 176}
 177
 178static int cc_map_req(struct device *dev, struct ahash_req_ctx *state,
 179		      struct cc_hash_ctx *ctx)
 180{
 181	bool is_hmac = ctx->is_hmac;
 182
 183	state->digest_buff_dma_addr =
 184		dma_map_single(dev, state->digest_buff,
 185			       ctx->inter_digestsize, DMA_BIDIRECTIONAL);
 186	if (dma_mapping_error(dev, state->digest_buff_dma_addr)) {
 187		dev_err(dev, "Mapping digest len %d B at va=%pK for DMA failed\n",
 188			ctx->inter_digestsize, state->digest_buff);
 189		return -EINVAL;
 190	}
 191	dev_dbg(dev, "Mapped digest %d B at va=%pK to dma=%pad\n",
 192		ctx->inter_digestsize, state->digest_buff,
 193		&state->digest_buff_dma_addr);
 194
 195	if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
 196		state->digest_bytes_len_dma_addr =
 197			dma_map_single(dev, state->digest_bytes_len,
 198				       HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
 199		if (dma_mapping_error(dev, state->digest_bytes_len_dma_addr)) {
 200			dev_err(dev, "Mapping digest len %u B at va=%pK for DMA failed\n",
 201				HASH_MAX_LEN_SIZE, state->digest_bytes_len);
 202			goto unmap_digest_buf;
 203		}
 204		dev_dbg(dev, "Mapped digest len %u B at va=%pK to dma=%pad\n",
 205			HASH_MAX_LEN_SIZE, state->digest_bytes_len,
 206			&state->digest_bytes_len_dma_addr);
 207	}
 208
 209	if (is_hmac && ctx->hash_mode != DRV_HASH_NULL) {
 210		state->opad_digest_dma_addr =
 211			dma_map_single(dev, state->opad_digest_buff,
 212				       ctx->inter_digestsize,
 213				       DMA_BIDIRECTIONAL);
 214		if (dma_mapping_error(dev, state->opad_digest_dma_addr)) {
 215			dev_err(dev, "Mapping opad digest %d B at va=%pK for DMA failed\n",
 216				ctx->inter_digestsize,
 217				state->opad_digest_buff);
 218			goto unmap_digest_len;
 219		}
 220		dev_dbg(dev, "Mapped opad digest %d B at va=%pK to dma=%pad\n",
 221			ctx->inter_digestsize, state->opad_digest_buff,
 222			&state->opad_digest_dma_addr);
 223	}
 224
 225	return 0;
 226
 227unmap_digest_len:
 228	if (state->digest_bytes_len_dma_addr) {
 229		dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
 230				 HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
 231		state->digest_bytes_len_dma_addr = 0;
 232	}
 233unmap_digest_buf:
 234	if (state->digest_buff_dma_addr) {
 235		dma_unmap_single(dev, state->digest_buff_dma_addr,
 236				 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
 237		state->digest_buff_dma_addr = 0;
 238	}
 239
 240	return -EINVAL;
 241}
 242
 243static void cc_unmap_req(struct device *dev, struct ahash_req_ctx *state,
 244			 struct cc_hash_ctx *ctx)
 245{
 246	if (state->digest_buff_dma_addr) {
 247		dma_unmap_single(dev, state->digest_buff_dma_addr,
 248				 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
 249		dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
 250			&state->digest_buff_dma_addr);
 251		state->digest_buff_dma_addr = 0;
 252	}
 253	if (state->digest_bytes_len_dma_addr) {
 254		dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
 255				 HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
 256		dev_dbg(dev, "Unmapped digest-bytes-len buffer: digest_bytes_len_dma_addr=%pad\n",
 257			&state->digest_bytes_len_dma_addr);
 258		state->digest_bytes_len_dma_addr = 0;
 259	}
 260	if (state->opad_digest_dma_addr) {
 261		dma_unmap_single(dev, state->opad_digest_dma_addr,
 262				 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
 263		dev_dbg(dev, "Unmapped opad-digest: opad_digest_dma_addr=%pad\n",
 264			&state->opad_digest_dma_addr);
 265		state->opad_digest_dma_addr = 0;
 266	}
 267}
 268
 269static void cc_unmap_result(struct device *dev, struct ahash_req_ctx *state,
 270			    unsigned int digestsize, u8 *result)
 271{
 272	if (state->digest_result_dma_addr) {
 273		dma_unmap_single(dev, state->digest_result_dma_addr, digestsize,
 274				 DMA_BIDIRECTIONAL);
 275		dev_dbg(dev, "unmpa digest result buffer va (%pK) pa (%pad) len %u\n",
 276			state->digest_result_buff,
 277			&state->digest_result_dma_addr, digestsize);
 278		memcpy(result, state->digest_result_buff, digestsize);
 279	}
 280	state->digest_result_dma_addr = 0;
 281}
 282
 283static void cc_update_complete(struct device *dev, void *cc_req, int err)
 284{
 285	struct ahash_request *req = (struct ahash_request *)cc_req;
 286	struct ahash_req_ctx *state = ahash_request_ctx(req);
 287	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 288	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 289
 290	dev_dbg(dev, "req=%pK\n", req);
 291
 292	if (err != -EINPROGRESS) {
 293		/* Not a BACKLOG notification */
 294		cc_unmap_hash_request(dev, state, req->src, false);
 295		cc_unmap_req(dev, state, ctx);
 296	}
 297
 298	ahash_request_complete(req, err);
 299}
 300
 301static void cc_digest_complete(struct device *dev, void *cc_req, int err)
 302{
 303	struct ahash_request *req = (struct ahash_request *)cc_req;
 304	struct ahash_req_ctx *state = ahash_request_ctx(req);
 305	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 306	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 307	u32 digestsize = crypto_ahash_digestsize(tfm);
 308
 309	dev_dbg(dev, "req=%pK\n", req);
 310
 311	if (err != -EINPROGRESS) {
 312		/* Not a BACKLOG notification */
 313		cc_unmap_hash_request(dev, state, req->src, false);
 314		cc_unmap_result(dev, state, digestsize, req->result);
 315		cc_unmap_req(dev, state, ctx);
 316	}
 317
 318	ahash_request_complete(req, err);
 319}
 320
 321static void cc_hash_complete(struct device *dev, void *cc_req, int err)
 322{
 323	struct ahash_request *req = (struct ahash_request *)cc_req;
 324	struct ahash_req_ctx *state = ahash_request_ctx(req);
 325	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 326	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 327	u32 digestsize = crypto_ahash_digestsize(tfm);
 328
 329	dev_dbg(dev, "req=%pK\n", req);
 330
 331	if (err != -EINPROGRESS) {
 332		/* Not a BACKLOG notification */
 333		cc_unmap_hash_request(dev, state, req->src, false);
 334		cc_unmap_result(dev, state, digestsize, req->result);
 335		cc_unmap_req(dev, state, ctx);
 336	}
 337
 338	ahash_request_complete(req, err);
 339}
 340
 341static int cc_fin_result(struct cc_hw_desc *desc, struct ahash_request *req,
 342			 int idx)
 343{
 344	struct ahash_req_ctx *state = ahash_request_ctx(req);
 345	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 346	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 347	u32 digestsize = crypto_ahash_digestsize(tfm);
 348
 349	/* Get final MAC result */
 350	hw_desc_init(&desc[idx]);
 351	set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
 
 352	set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
 353		      NS_BIT, 1);
 354	set_queue_last_ind(ctx->drvdata, &desc[idx]);
 355	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 356	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 357	set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
 358	cc_set_endianity(ctx->hash_mode, &desc[idx]);
 359	idx++;
 360
 361	return idx;
 362}
 363
 364static int cc_fin_hmac(struct cc_hw_desc *desc, struct ahash_request *req,
 365		       int idx)
 366{
 367	struct ahash_req_ctx *state = ahash_request_ctx(req);
 368	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 369	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 370	u32 digestsize = crypto_ahash_digestsize(tfm);
 371
 372	/* store the hash digest result in the context */
 373	hw_desc_init(&desc[idx]);
 374	set_cipher_mode(&desc[idx], ctx->hw_mode);
 375	set_dout_dlli(&desc[idx], state->digest_buff_dma_addr, digestsize,
 376		      NS_BIT, 0);
 377	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 378	cc_set_endianity(ctx->hash_mode, &desc[idx]);
 379	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 380	idx++;
 381
 382	/* Loading hash opad xor key state */
 383	hw_desc_init(&desc[idx]);
 384	set_cipher_mode(&desc[idx], ctx->hw_mode);
 385	set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
 386		     ctx->inter_digestsize, NS_BIT);
 387	set_flow_mode(&desc[idx], S_DIN_to_HASH);
 388	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 389	idx++;
 390
 391	/* Load the hash current length */
 392	hw_desc_init(&desc[idx]);
 393	set_cipher_mode(&desc[idx], ctx->hw_mode);
 394	set_din_sram(&desc[idx],
 395		     cc_digest_len_addr(ctx->drvdata, ctx->hash_mode),
 396		     ctx->hash_len);
 397	set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
 398	set_flow_mode(&desc[idx], S_DIN_to_HASH);
 399	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 400	idx++;
 401
 402	/* Memory Barrier: wait for IPAD/OPAD axi write to complete */
 403	hw_desc_init(&desc[idx]);
 404	set_din_no_dma(&desc[idx], 0, 0xfffff0);
 405	set_dout_no_dma(&desc[idx], 0, 0, 1);
 406	idx++;
 407
 408	/* Perform HASH update */
 409	hw_desc_init(&desc[idx]);
 410	set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
 411		     digestsize, NS_BIT);
 412	set_flow_mode(&desc[idx], DIN_HASH);
 413	idx++;
 414
 415	return idx;
 416}
 417
 418static int cc_hash_digest(struct ahash_request *req)
 419{
 420	struct ahash_req_ctx *state = ahash_request_ctx(req);
 421	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 422	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 423	u32 digestsize = crypto_ahash_digestsize(tfm);
 424	struct scatterlist *src = req->src;
 425	unsigned int nbytes = req->nbytes;
 426	u8 *result = req->result;
 427	struct device *dev = drvdata_to_dev(ctx->drvdata);
 428	bool is_hmac = ctx->is_hmac;
 429	struct cc_crypto_req cc_req = {};
 430	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
 431	u32 larval_digest_addr;
 
 432	int idx = 0;
 433	int rc = 0;
 434	gfp_t flags = cc_gfp_flags(&req->base);
 435
 436	dev_dbg(dev, "===== %s-digest (%d) ====\n", is_hmac ? "hmac" : "hash",
 437		nbytes);
 438
 439	cc_init_req(dev, state, ctx);
 440
 441	if (cc_map_req(dev, state, ctx)) {
 442		dev_err(dev, "map_ahash_source() failed\n");
 443		return -ENOMEM;
 444	}
 445
 446	if (cc_map_result(dev, state, digestsize)) {
 447		dev_err(dev, "map_ahash_digest() failed\n");
 448		cc_unmap_req(dev, state, ctx);
 449		return -ENOMEM;
 450	}
 451
 452	if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1,
 453				      flags)) {
 454		dev_err(dev, "map_ahash_request_final() failed\n");
 455		cc_unmap_result(dev, state, digestsize, result);
 456		cc_unmap_req(dev, state, ctx);
 457		return -ENOMEM;
 458	}
 459
 460	/* Setup request structure */
 461	cc_req.user_cb = cc_digest_complete;
 462	cc_req.user_arg = req;
 463
 464	/* If HMAC then load hash IPAD xor key, if HASH then load initial
 465	 * digest
 466	 */
 467	hw_desc_init(&desc[idx]);
 468	set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
 469	if (is_hmac) {
 470		set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
 471			     ctx->inter_digestsize, NS_BIT);
 472	} else {
 473		larval_digest_addr = cc_larval_digest_addr(ctx->drvdata,
 474							   ctx->hash_mode);
 475		set_din_sram(&desc[idx], larval_digest_addr,
 476			     ctx->inter_digestsize);
 477	}
 478	set_flow_mode(&desc[idx], S_DIN_to_HASH);
 479	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 480	idx++;
 481
 482	/* Load the hash current length */
 483	hw_desc_init(&desc[idx]);
 484	set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
 485
 486	if (is_hmac) {
 487		set_din_type(&desc[idx], DMA_DLLI,
 488			     state->digest_bytes_len_dma_addr,
 489			     ctx->hash_len, NS_BIT);
 490	} else {
 491		set_din_const(&desc[idx], 0, ctx->hash_len);
 492		if (nbytes)
 493			set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
 494		else
 495			set_cipher_do(&desc[idx], DO_PAD);
 496	}
 497	set_flow_mode(&desc[idx], S_DIN_to_HASH);
 498	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 499	idx++;
 500
 501	cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx);
 502
 503	if (is_hmac) {
 504		/* HW last hash block padding (aka. "DO_PAD") */
 505		hw_desc_init(&desc[idx]);
 506		set_cipher_mode(&desc[idx], ctx->hw_mode);
 507		set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
 508			      ctx->hash_len, NS_BIT, 0);
 509		set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 510		set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
 511		set_cipher_do(&desc[idx], DO_PAD);
 512		idx++;
 513
 514		idx = cc_fin_hmac(desc, req, idx);
 515	}
 516
 517	idx = cc_fin_result(desc, req, idx);
 518
 519	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
 520	if (rc != -EINPROGRESS && rc != -EBUSY) {
 521		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
 522		cc_unmap_hash_request(dev, state, src, true);
 523		cc_unmap_result(dev, state, digestsize, result);
 524		cc_unmap_req(dev, state, ctx);
 525	}
 526	return rc;
 527}
 528
 529static int cc_restore_hash(struct cc_hw_desc *desc, struct cc_hash_ctx *ctx,
 530			   struct ahash_req_ctx *state, unsigned int idx)
 531{
 532	/* Restore hash digest */
 533	hw_desc_init(&desc[idx]);
 534	set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
 535	set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
 536		     ctx->inter_digestsize, NS_BIT);
 537	set_flow_mode(&desc[idx], S_DIN_to_HASH);
 538	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 539	idx++;
 540
 541	/* Restore hash current length */
 542	hw_desc_init(&desc[idx]);
 543	set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
 544	set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
 545	set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
 546		     ctx->hash_len, NS_BIT);
 547	set_flow_mode(&desc[idx], S_DIN_to_HASH);
 548	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 549	idx++;
 550
 551	cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx);
 552
 553	return idx;
 554}
 555
 556static int cc_hash_update(struct ahash_request *req)
 557{
 558	struct ahash_req_ctx *state = ahash_request_ctx(req);
 559	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 560	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 561	unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
 562	struct scatterlist *src = req->src;
 563	unsigned int nbytes = req->nbytes;
 564	struct device *dev = drvdata_to_dev(ctx->drvdata);
 565	struct cc_crypto_req cc_req = {};
 566	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
 567	u32 idx = 0;
 568	int rc;
 569	gfp_t flags = cc_gfp_flags(&req->base);
 570
 571	dev_dbg(dev, "===== %s-update (%d) ====\n", ctx->is_hmac ?
 572		"hmac" : "hash", nbytes);
 573
 574	if (nbytes == 0) {
 575		/* no real updates required */
 576		return 0;
 577	}
 578
 579	rc = cc_map_hash_request_update(ctx->drvdata, state, src, nbytes,
 580					block_size, flags);
 581	if (rc) {
 582		if (rc == 1) {
 583			dev_dbg(dev, " data size not require HW update %x\n",
 584				nbytes);
 585			/* No hardware updates are required */
 586			return 0;
 587		}
 588		dev_err(dev, "map_ahash_request_update() failed\n");
 589		return -ENOMEM;
 590	}
 591
 592	if (cc_map_req(dev, state, ctx)) {
 593		dev_err(dev, "map_ahash_source() failed\n");
 594		cc_unmap_hash_request(dev, state, src, true);
 595		return -EINVAL;
 596	}
 597
 598	/* Setup request structure */
 599	cc_req.user_cb = cc_update_complete;
 600	cc_req.user_arg = req;
 601
 602	idx = cc_restore_hash(desc, ctx, state, idx);
 603
 604	/* store the hash digest result in context */
 605	hw_desc_init(&desc[idx]);
 606	set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
 607	set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
 608		      ctx->inter_digestsize, NS_BIT, 0);
 609	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 610	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 611	idx++;
 612
 613	/* store current hash length in context */
 614	hw_desc_init(&desc[idx]);
 615	set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
 616	set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
 617		      ctx->hash_len, NS_BIT, 1);
 618	set_queue_last_ind(ctx->drvdata, &desc[idx]);
 619	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 620	set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
 621	idx++;
 622
 623	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
 624	if (rc != -EINPROGRESS && rc != -EBUSY) {
 625		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
 626		cc_unmap_hash_request(dev, state, src, true);
 627		cc_unmap_req(dev, state, ctx);
 628	}
 629	return rc;
 630}
 631
 632static int cc_do_finup(struct ahash_request *req, bool update)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 633{
 634	struct ahash_req_ctx *state = ahash_request_ctx(req);
 635	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 636	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 637	u32 digestsize = crypto_ahash_digestsize(tfm);
 638	struct scatterlist *src = req->src;
 639	unsigned int nbytes = req->nbytes;
 640	u8 *result = req->result;
 641	struct device *dev = drvdata_to_dev(ctx->drvdata);
 642	bool is_hmac = ctx->is_hmac;
 643	struct cc_crypto_req cc_req = {};
 644	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
 645	unsigned int idx = 0;
 646	int rc;
 647	gfp_t flags = cc_gfp_flags(&req->base);
 648
 649	dev_dbg(dev, "===== %s-%s (%d) ====\n", is_hmac ? "hmac" : "hash",
 650		update ? "finup" : "final", nbytes);
 651
 652	if (cc_map_req(dev, state, ctx)) {
 653		dev_err(dev, "map_ahash_source() failed\n");
 654		return -EINVAL;
 655	}
 656
 657	if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, update,
 658				      flags)) {
 659		dev_err(dev, "map_ahash_request_final() failed\n");
 660		cc_unmap_req(dev, state, ctx);
 661		return -ENOMEM;
 662	}
 
 663	if (cc_map_result(dev, state, digestsize)) {
 664		dev_err(dev, "map_ahash_digest() failed\n");
 665		cc_unmap_hash_request(dev, state, src, true);
 666		cc_unmap_req(dev, state, ctx);
 667		return -ENOMEM;
 668	}
 669
 670	/* Setup request structure */
 671	cc_req.user_cb = cc_hash_complete;
 672	cc_req.user_arg = req;
 673
 674	idx = cc_restore_hash(desc, ctx, state, idx);
 675
 676	/* Pad the hash */
 677	hw_desc_init(&desc[idx]);
 678	set_cipher_do(&desc[idx], DO_PAD);
 679	set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode);
 680	set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
 681		      ctx->hash_len, NS_BIT, 0);
 682	set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
 683	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 684	idx++;
 685
 686	if (is_hmac)
 687		idx = cc_fin_hmac(desc, req, idx);
 688
 689	idx = cc_fin_result(desc, req, idx);
 690
 691	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
 692	if (rc != -EINPROGRESS && rc != -EBUSY) {
 693		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
 694		cc_unmap_hash_request(dev, state, src, true);
 695		cc_unmap_result(dev, state, digestsize, result);
 696		cc_unmap_req(dev, state, ctx);
 697	}
 698	return rc;
 699}
 700
 701static int cc_hash_finup(struct ahash_request *req)
 702{
 703	return cc_do_finup(req, true);
 704}
 705
 706
 707static int cc_hash_final(struct ahash_request *req)
 708{
 709	return cc_do_finup(req, false);
 710}
 711
 712static int cc_hash_init(struct ahash_request *req)
 713{
 714	struct ahash_req_ctx *state = ahash_request_ctx(req);
 715	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 716	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 717	struct device *dev = drvdata_to_dev(ctx->drvdata);
 718
 719	dev_dbg(dev, "===== init (%d) ====\n", req->nbytes);
 720
 721	cc_init_req(dev, state, ctx);
 722
 723	return 0;
 724}
 725
 726static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
 727			  unsigned int keylen)
 728{
 729	unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
 730	struct cc_crypto_req cc_req = {};
 731	struct cc_hash_ctx *ctx = NULL;
 732	int blocksize = 0;
 733	int digestsize = 0;
 734	int i, idx = 0, rc = 0;
 735	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
 736	u32 larval_addr;
 737	struct device *dev;
 738
 739	ctx = crypto_ahash_ctx(ahash);
 740	dev = drvdata_to_dev(ctx->drvdata);
 741	dev_dbg(dev, "start keylen: %d", keylen);
 742
 743	blocksize = crypto_tfm_alg_blocksize(&ahash->base);
 744	digestsize = crypto_ahash_digestsize(ahash);
 745
 746	larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->hash_mode);
 747
 748	/* The keylen value distinguishes HASH in case keylen is ZERO bytes,
 749	 * any NON-ZERO value utilizes HMAC flow
 750	 */
 751	ctx->key_params.keylen = keylen;
 752	ctx->key_params.key_dma_addr = 0;
 753	ctx->is_hmac = true;
 754	ctx->key_params.key = NULL;
 755
 756	if (keylen) {
 757		ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL);
 758		if (!ctx->key_params.key)
 759			return -ENOMEM;
 760
 761		ctx->key_params.key_dma_addr =
 762			dma_map_single(dev, ctx->key_params.key, keylen,
 763				       DMA_TO_DEVICE);
 764		if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
 765			dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
 766				ctx->key_params.key, keylen);
 767			kfree_sensitive(ctx->key_params.key);
 768			return -ENOMEM;
 769		}
 770		dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
 771			&ctx->key_params.key_dma_addr, ctx->key_params.keylen);
 772
 773		if (keylen > blocksize) {
 774			/* Load hash initial state */
 775			hw_desc_init(&desc[idx]);
 776			set_cipher_mode(&desc[idx], ctx->hw_mode);
 777			set_din_sram(&desc[idx], larval_addr,
 778				     ctx->inter_digestsize);
 779			set_flow_mode(&desc[idx], S_DIN_to_HASH);
 780			set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 781			idx++;
 782
 783			/* Load the hash current length*/
 784			hw_desc_init(&desc[idx]);
 785			set_cipher_mode(&desc[idx], ctx->hw_mode);
 786			set_din_const(&desc[idx], 0, ctx->hash_len);
 787			set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
 788			set_flow_mode(&desc[idx], S_DIN_to_HASH);
 789			set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 790			idx++;
 791
 792			hw_desc_init(&desc[idx]);
 793			set_din_type(&desc[idx], DMA_DLLI,
 794				     ctx->key_params.key_dma_addr, keylen,
 795				     NS_BIT);
 796			set_flow_mode(&desc[idx], DIN_HASH);
 797			idx++;
 798
 799			/* Get hashed key */
 800			hw_desc_init(&desc[idx]);
 801			set_cipher_mode(&desc[idx], ctx->hw_mode);
 802			set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
 803				      digestsize, NS_BIT, 0);
 804			set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 805			set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 806			set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
 807			cc_set_endianity(ctx->hash_mode, &desc[idx]);
 808			idx++;
 809
 810			hw_desc_init(&desc[idx]);
 811			set_din_const(&desc[idx], 0, (blocksize - digestsize));
 812			set_flow_mode(&desc[idx], BYPASS);
 813			set_dout_dlli(&desc[idx],
 814				      (ctx->opad_tmp_keys_dma_addr +
 815				       digestsize),
 816				      (blocksize - digestsize), NS_BIT, 0);
 817			idx++;
 818		} else {
 819			hw_desc_init(&desc[idx]);
 820			set_din_type(&desc[idx], DMA_DLLI,
 821				     ctx->key_params.key_dma_addr, keylen,
 822				     NS_BIT);
 823			set_flow_mode(&desc[idx], BYPASS);
 824			set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
 825				      keylen, NS_BIT, 0);
 826			idx++;
 827
 828			if ((blocksize - keylen)) {
 829				hw_desc_init(&desc[idx]);
 830				set_din_const(&desc[idx], 0,
 831					      (blocksize - keylen));
 832				set_flow_mode(&desc[idx], BYPASS);
 833				set_dout_dlli(&desc[idx],
 834					      (ctx->opad_tmp_keys_dma_addr +
 835					       keylen), (blocksize - keylen),
 836					      NS_BIT, 0);
 837				idx++;
 838			}
 839		}
 840	} else {
 841		hw_desc_init(&desc[idx]);
 842		set_din_const(&desc[idx], 0, blocksize);
 843		set_flow_mode(&desc[idx], BYPASS);
 844		set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr),
 845			      blocksize, NS_BIT, 0);
 846		idx++;
 847	}
 848
 849	rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
 850	if (rc) {
 851		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
 852		goto out;
 853	}
 854
 855	/* calc derived HMAC key */
 856	for (idx = 0, i = 0; i < 2; i++) {
 857		/* Load hash initial state */
 858		hw_desc_init(&desc[idx]);
 859		set_cipher_mode(&desc[idx], ctx->hw_mode);
 860		set_din_sram(&desc[idx], larval_addr, ctx->inter_digestsize);
 861		set_flow_mode(&desc[idx], S_DIN_to_HASH);
 862		set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 863		idx++;
 864
 865		/* Load the hash current length*/
 866		hw_desc_init(&desc[idx]);
 867		set_cipher_mode(&desc[idx], ctx->hw_mode);
 868		set_din_const(&desc[idx], 0, ctx->hash_len);
 869		set_flow_mode(&desc[idx], S_DIN_to_HASH);
 870		set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 871		idx++;
 872
 873		/* Prepare ipad key */
 874		hw_desc_init(&desc[idx]);
 875		set_xor_val(&desc[idx], hmac_pad_const[i]);
 876		set_cipher_mode(&desc[idx], ctx->hw_mode);
 877		set_flow_mode(&desc[idx], S_DIN_to_HASH);
 878		set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
 879		idx++;
 880
 881		/* Perform HASH update */
 882		hw_desc_init(&desc[idx]);
 883		set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
 884			     blocksize, NS_BIT);
 885		set_cipher_mode(&desc[idx], ctx->hw_mode);
 886		set_xor_active(&desc[idx]);
 887		set_flow_mode(&desc[idx], DIN_HASH);
 888		idx++;
 889
 890		/* Get the IPAD/OPAD xor key (Note, IPAD is the initial digest
 891		 * of the first HASH "update" state)
 892		 */
 893		hw_desc_init(&desc[idx]);
 894		set_cipher_mode(&desc[idx], ctx->hw_mode);
 895		if (i > 0) /* Not first iteration */
 896			set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
 897				      ctx->inter_digestsize, NS_BIT, 0);
 898		else /* First iteration */
 899			set_dout_dlli(&desc[idx], ctx->digest_buff_dma_addr,
 900				      ctx->inter_digestsize, NS_BIT, 0);
 901		set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 902		set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 903		idx++;
 904	}
 905
 906	rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
 907
 908out:
 
 
 
 909	if (ctx->key_params.key_dma_addr) {
 910		dma_unmap_single(dev, ctx->key_params.key_dma_addr,
 911				 ctx->key_params.keylen, DMA_TO_DEVICE);
 912		dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
 913			&ctx->key_params.key_dma_addr, ctx->key_params.keylen);
 914	}
 915
 916	kfree_sensitive(ctx->key_params.key);
 917
 918	return rc;
 919}
 920
 921static int cc_xcbc_setkey(struct crypto_ahash *ahash,
 922			  const u8 *key, unsigned int keylen)
 923{
 924	struct cc_crypto_req cc_req = {};
 925	struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
 926	struct device *dev = drvdata_to_dev(ctx->drvdata);
 927	int rc = 0;
 928	unsigned int idx = 0;
 929	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
 930
 931	dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
 932
 933	switch (keylen) {
 934	case AES_KEYSIZE_128:
 935	case AES_KEYSIZE_192:
 936	case AES_KEYSIZE_256:
 937		break;
 938	default:
 939		return -EINVAL;
 940	}
 941
 942	ctx->key_params.keylen = keylen;
 943
 944	ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL);
 945	if (!ctx->key_params.key)
 946		return -ENOMEM;
 947
 948	ctx->key_params.key_dma_addr =
 949		dma_map_single(dev, ctx->key_params.key, keylen, DMA_TO_DEVICE);
 950	if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
 951		dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
 952			key, keylen);
 953		kfree_sensitive(ctx->key_params.key);
 954		return -ENOMEM;
 955	}
 956	dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
 957		&ctx->key_params.key_dma_addr, ctx->key_params.keylen);
 958
 959	ctx->is_hmac = true;
 960	/* 1. Load the AES key */
 961	hw_desc_init(&desc[idx]);
 962	set_din_type(&desc[idx], DMA_DLLI, ctx->key_params.key_dma_addr,
 963		     keylen, NS_BIT);
 964	set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
 965	set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
 966	set_key_size_aes(&desc[idx], keylen);
 967	set_flow_mode(&desc[idx], S_DIN_to_AES);
 968	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 969	idx++;
 970
 971	hw_desc_init(&desc[idx]);
 972	set_din_const(&desc[idx], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
 973	set_flow_mode(&desc[idx], DIN_AES_DOUT);
 974	set_dout_dlli(&desc[idx],
 975		      (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET),
 976		      CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
 977	idx++;
 978
 979	hw_desc_init(&desc[idx]);
 980	set_din_const(&desc[idx], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
 981	set_flow_mode(&desc[idx], DIN_AES_DOUT);
 982	set_dout_dlli(&desc[idx],
 983		      (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET),
 984		      CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
 985	idx++;
 986
 987	hw_desc_init(&desc[idx]);
 988	set_din_const(&desc[idx], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
 989	set_flow_mode(&desc[idx], DIN_AES_DOUT);
 990	set_dout_dlli(&desc[idx],
 991		      (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET),
 992		      CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
 993	idx++;
 994
 995	rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
 996
 
 
 
 997	dma_unmap_single(dev, ctx->key_params.key_dma_addr,
 998			 ctx->key_params.keylen, DMA_TO_DEVICE);
 999	dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
1000		&ctx->key_params.key_dma_addr, ctx->key_params.keylen);
1001
1002	kfree_sensitive(ctx->key_params.key);
1003
1004	return rc;
1005}
1006
1007static int cc_cmac_setkey(struct crypto_ahash *ahash,
1008			  const u8 *key, unsigned int keylen)
1009{
1010	struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1011	struct device *dev = drvdata_to_dev(ctx->drvdata);
1012
1013	dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
1014
1015	ctx->is_hmac = true;
1016
1017	switch (keylen) {
1018	case AES_KEYSIZE_128:
1019	case AES_KEYSIZE_192:
1020	case AES_KEYSIZE_256:
1021		break;
1022	default:
1023		return -EINVAL;
1024	}
1025
1026	ctx->key_params.keylen = keylen;
1027
1028	/* STAT_PHASE_1: Copy key to ctx */
1029
1030	dma_sync_single_for_cpu(dev, ctx->opad_tmp_keys_dma_addr,
1031				keylen, DMA_TO_DEVICE);
1032
1033	memcpy(ctx->opad_tmp_keys_buff, key, keylen);
1034	if (keylen == 24) {
1035		memset(ctx->opad_tmp_keys_buff + 24, 0,
1036		       CC_AES_KEY_SIZE_MAX - 24);
1037	}
1038
1039	dma_sync_single_for_device(dev, ctx->opad_tmp_keys_dma_addr,
1040				   keylen, DMA_TO_DEVICE);
1041
1042	ctx->key_params.keylen = keylen;
1043
1044	return 0;
1045}
1046
1047static void cc_free_ctx(struct cc_hash_ctx *ctx)
1048{
1049	struct device *dev = drvdata_to_dev(ctx->drvdata);
1050
1051	if (ctx->digest_buff_dma_addr) {
1052		dma_unmap_single(dev, ctx->digest_buff_dma_addr,
1053				 sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
1054		dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
1055			&ctx->digest_buff_dma_addr);
1056		ctx->digest_buff_dma_addr = 0;
1057	}
1058	if (ctx->opad_tmp_keys_dma_addr) {
1059		dma_unmap_single(dev, ctx->opad_tmp_keys_dma_addr,
1060				 sizeof(ctx->opad_tmp_keys_buff),
1061				 DMA_BIDIRECTIONAL);
1062		dev_dbg(dev, "Unmapped opad-digest: opad_tmp_keys_dma_addr=%pad\n",
1063			&ctx->opad_tmp_keys_dma_addr);
1064		ctx->opad_tmp_keys_dma_addr = 0;
1065	}
1066
1067	ctx->key_params.keylen = 0;
1068}
1069
1070static int cc_alloc_ctx(struct cc_hash_ctx *ctx)
1071{
1072	struct device *dev = drvdata_to_dev(ctx->drvdata);
1073
1074	ctx->key_params.keylen = 0;
1075
1076	ctx->digest_buff_dma_addr =
1077		dma_map_single(dev, ctx->digest_buff, sizeof(ctx->digest_buff),
1078			       DMA_BIDIRECTIONAL);
1079	if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) {
1080		dev_err(dev, "Mapping digest len %zu B at va=%pK for DMA failed\n",
1081			sizeof(ctx->digest_buff), ctx->digest_buff);
1082		goto fail;
1083	}
1084	dev_dbg(dev, "Mapped digest %zu B at va=%pK to dma=%pad\n",
1085		sizeof(ctx->digest_buff), ctx->digest_buff,
1086		&ctx->digest_buff_dma_addr);
1087
1088	ctx->opad_tmp_keys_dma_addr =
1089		dma_map_single(dev, ctx->opad_tmp_keys_buff,
1090			       sizeof(ctx->opad_tmp_keys_buff),
1091			       DMA_BIDIRECTIONAL);
1092	if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) {
1093		dev_err(dev, "Mapping opad digest %zu B at va=%pK for DMA failed\n",
1094			sizeof(ctx->opad_tmp_keys_buff),
1095			ctx->opad_tmp_keys_buff);
1096		goto fail;
1097	}
1098	dev_dbg(dev, "Mapped opad_tmp_keys %zu B at va=%pK to dma=%pad\n",
1099		sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff,
1100		&ctx->opad_tmp_keys_dma_addr);
1101
1102	ctx->is_hmac = false;
1103	return 0;
1104
1105fail:
1106	cc_free_ctx(ctx);
1107	return -ENOMEM;
1108}
1109
1110static int cc_get_hash_len(struct crypto_tfm *tfm)
1111{
1112	struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1113
1114	if (ctx->hash_mode == DRV_HASH_SM3)
1115		return CC_SM3_HASH_LEN_SIZE;
1116	else
1117		return cc_get_default_hash_len(ctx->drvdata);
1118}
1119
1120static int cc_cra_init(struct crypto_tfm *tfm)
1121{
1122	struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1123	struct hash_alg_common *hash_alg_common =
1124		container_of(tfm->__crt_alg, struct hash_alg_common, base);
1125	struct ahash_alg *ahash_alg =
1126		container_of(hash_alg_common, struct ahash_alg, halg);
1127	struct cc_hash_alg *cc_alg =
1128			container_of(ahash_alg, struct cc_hash_alg, ahash_alg);
1129
1130	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1131				 sizeof(struct ahash_req_ctx));
1132
1133	ctx->hash_mode = cc_alg->hash_mode;
1134	ctx->hw_mode = cc_alg->hw_mode;
1135	ctx->inter_digestsize = cc_alg->inter_digestsize;
1136	ctx->drvdata = cc_alg->drvdata;
1137	ctx->hash_len = cc_get_hash_len(tfm);
1138	return cc_alloc_ctx(ctx);
1139}
1140
1141static void cc_cra_exit(struct crypto_tfm *tfm)
1142{
1143	struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1144	struct device *dev = drvdata_to_dev(ctx->drvdata);
1145
1146	dev_dbg(dev, "cc_cra_exit");
1147	cc_free_ctx(ctx);
1148}
1149
1150static int cc_mac_update(struct ahash_request *req)
1151{
1152	struct ahash_req_ctx *state = ahash_request_ctx(req);
1153	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1154	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1155	struct device *dev = drvdata_to_dev(ctx->drvdata);
1156	unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
1157	struct cc_crypto_req cc_req = {};
1158	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1159	int rc;
1160	u32 idx = 0;
1161	gfp_t flags = cc_gfp_flags(&req->base);
1162
1163	if (req->nbytes == 0) {
1164		/* no real updates required */
1165		return 0;
1166	}
1167
1168	state->xcbc_count++;
1169
1170	rc = cc_map_hash_request_update(ctx->drvdata, state, req->src,
1171					req->nbytes, block_size, flags);
1172	if (rc) {
1173		if (rc == 1) {
1174			dev_dbg(dev, " data size not require HW update %x\n",
1175				req->nbytes);
1176			/* No hardware updates are required */
1177			return 0;
1178		}
1179		dev_err(dev, "map_ahash_request_update() failed\n");
1180		return -ENOMEM;
1181	}
1182
1183	if (cc_map_req(dev, state, ctx)) {
1184		dev_err(dev, "map_ahash_source() failed\n");
1185		return -EINVAL;
1186	}
1187
1188	if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
1189		cc_setup_xcbc(req, desc, &idx);
1190	else
1191		cc_setup_cmac(req, desc, &idx);
1192
1193	cc_set_desc(state, ctx, DIN_AES_DOUT, desc, true, &idx);
1194
1195	/* store the hash digest result in context */
1196	hw_desc_init(&desc[idx]);
1197	set_cipher_mode(&desc[idx], ctx->hw_mode);
1198	set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
1199		      ctx->inter_digestsize, NS_BIT, 1);
1200	set_queue_last_ind(ctx->drvdata, &desc[idx]);
1201	set_flow_mode(&desc[idx], S_AES_to_DOUT);
1202	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1203	idx++;
1204
1205	/* Setup request structure */
1206	cc_req.user_cb = cc_update_complete;
1207	cc_req.user_arg = req;
1208
1209	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1210	if (rc != -EINPROGRESS && rc != -EBUSY) {
1211		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1212		cc_unmap_hash_request(dev, state, req->src, true);
1213		cc_unmap_req(dev, state, ctx);
1214	}
1215	return rc;
1216}
1217
1218static int cc_mac_final(struct ahash_request *req)
1219{
1220	struct ahash_req_ctx *state = ahash_request_ctx(req);
1221	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1222	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1223	struct device *dev = drvdata_to_dev(ctx->drvdata);
1224	struct cc_crypto_req cc_req = {};
1225	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1226	int idx = 0;
1227	int rc = 0;
1228	u32 key_size, key_len;
1229	u32 digestsize = crypto_ahash_digestsize(tfm);
1230	gfp_t flags = cc_gfp_flags(&req->base);
1231	u32 rem_cnt = *cc_hash_buf_cnt(state);
1232
1233	if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1234		key_size = CC_AES_128_BIT_KEY_SIZE;
1235		key_len  = CC_AES_128_BIT_KEY_SIZE;
1236	} else {
1237		key_size = (ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
1238			ctx->key_params.keylen;
1239		key_len =  ctx->key_params.keylen;
1240	}
1241
1242	dev_dbg(dev, "===== final  xcbc reminder (%d) ====\n", rem_cnt);
1243
1244	if (cc_map_req(dev, state, ctx)) {
1245		dev_err(dev, "map_ahash_source() failed\n");
1246		return -EINVAL;
1247	}
1248
1249	if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
1250				      req->nbytes, 0, flags)) {
1251		dev_err(dev, "map_ahash_request_final() failed\n");
1252		cc_unmap_req(dev, state, ctx);
1253		return -ENOMEM;
1254	}
1255
1256	if (cc_map_result(dev, state, digestsize)) {
1257		dev_err(dev, "map_ahash_digest() failed\n");
1258		cc_unmap_hash_request(dev, state, req->src, true);
1259		cc_unmap_req(dev, state, ctx);
1260		return -ENOMEM;
1261	}
1262
1263	/* Setup request structure */
1264	cc_req.user_cb = cc_hash_complete;
1265	cc_req.user_arg = req;
1266
1267	if (state->xcbc_count && rem_cnt == 0) {
1268		/* Load key for ECB decryption */
1269		hw_desc_init(&desc[idx]);
1270		set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
1271		set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_DECRYPT);
1272		set_din_type(&desc[idx], DMA_DLLI,
1273			     (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET),
1274			     key_size, NS_BIT);
1275		set_key_size_aes(&desc[idx], key_len);
1276		set_flow_mode(&desc[idx], S_DIN_to_AES);
1277		set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1278		idx++;
1279
1280		/* Initiate decryption of block state to previous
1281		 * block_state-XOR-M[n]
1282		 */
1283		hw_desc_init(&desc[idx]);
1284		set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
1285			     CC_AES_BLOCK_SIZE, NS_BIT);
1286		set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
1287			      CC_AES_BLOCK_SIZE, NS_BIT, 0);
1288		set_flow_mode(&desc[idx], DIN_AES_DOUT);
1289		idx++;
1290
1291		/* Memory Barrier: wait for axi write to complete */
1292		hw_desc_init(&desc[idx]);
1293		set_din_no_dma(&desc[idx], 0, 0xfffff0);
1294		set_dout_no_dma(&desc[idx], 0, 0, 1);
1295		idx++;
1296	}
1297
1298	if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
1299		cc_setup_xcbc(req, desc, &idx);
1300	else
1301		cc_setup_cmac(req, desc, &idx);
1302
1303	if (state->xcbc_count == 0) {
1304		hw_desc_init(&desc[idx]);
1305		set_cipher_mode(&desc[idx], ctx->hw_mode);
1306		set_key_size_aes(&desc[idx], key_len);
1307		set_cmac_size0_mode(&desc[idx]);
1308		set_flow_mode(&desc[idx], S_DIN_to_AES);
1309		idx++;
1310	} else if (rem_cnt > 0) {
1311		cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1312	} else {
1313		hw_desc_init(&desc[idx]);
1314		set_din_const(&desc[idx], 0x00, CC_AES_BLOCK_SIZE);
1315		set_flow_mode(&desc[idx], DIN_AES_DOUT);
1316		idx++;
1317	}
1318
1319	/* Get final MAC result */
1320	hw_desc_init(&desc[idx]);
 
1321	set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1322		      digestsize, NS_BIT, 1);
1323	set_queue_last_ind(ctx->drvdata, &desc[idx]);
1324	set_flow_mode(&desc[idx], S_AES_to_DOUT);
1325	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1326	set_cipher_mode(&desc[idx], ctx->hw_mode);
1327	idx++;
1328
1329	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1330	if (rc != -EINPROGRESS && rc != -EBUSY) {
1331		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1332		cc_unmap_hash_request(dev, state, req->src, true);
1333		cc_unmap_result(dev, state, digestsize, req->result);
1334		cc_unmap_req(dev, state, ctx);
1335	}
1336	return rc;
1337}
1338
1339static int cc_mac_finup(struct ahash_request *req)
1340{
1341	struct ahash_req_ctx *state = ahash_request_ctx(req);
1342	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1343	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1344	struct device *dev = drvdata_to_dev(ctx->drvdata);
1345	struct cc_crypto_req cc_req = {};
1346	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1347	int idx = 0;
1348	int rc = 0;
1349	u32 key_len = 0;
1350	u32 digestsize = crypto_ahash_digestsize(tfm);
1351	gfp_t flags = cc_gfp_flags(&req->base);
1352
1353	dev_dbg(dev, "===== finup xcbc(%d) ====\n", req->nbytes);
1354	if (state->xcbc_count > 0 && req->nbytes == 0) {
1355		dev_dbg(dev, "No data to update. Call to fdx_mac_final\n");
1356		return cc_mac_final(req);
1357	}
1358
1359	if (cc_map_req(dev, state, ctx)) {
1360		dev_err(dev, "map_ahash_source() failed\n");
1361		return -EINVAL;
1362	}
1363
1364	if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
1365				      req->nbytes, 1, flags)) {
1366		dev_err(dev, "map_ahash_request_final() failed\n");
1367		cc_unmap_req(dev, state, ctx);
1368		return -ENOMEM;
1369	}
1370	if (cc_map_result(dev, state, digestsize)) {
1371		dev_err(dev, "map_ahash_digest() failed\n");
1372		cc_unmap_hash_request(dev, state, req->src, true);
1373		cc_unmap_req(dev, state, ctx);
1374		return -ENOMEM;
1375	}
1376
1377	/* Setup request structure */
1378	cc_req.user_cb = cc_hash_complete;
1379	cc_req.user_arg = req;
1380
1381	if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1382		key_len = CC_AES_128_BIT_KEY_SIZE;
1383		cc_setup_xcbc(req, desc, &idx);
1384	} else {
1385		key_len = ctx->key_params.keylen;
1386		cc_setup_cmac(req, desc, &idx);
1387	}
1388
1389	if (req->nbytes == 0) {
1390		hw_desc_init(&desc[idx]);
1391		set_cipher_mode(&desc[idx], ctx->hw_mode);
1392		set_key_size_aes(&desc[idx], key_len);
1393		set_cmac_size0_mode(&desc[idx]);
1394		set_flow_mode(&desc[idx], S_DIN_to_AES);
1395		idx++;
1396	} else {
1397		cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1398	}
1399
1400	/* Get final MAC result */
1401	hw_desc_init(&desc[idx]);
 
1402	set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1403		      digestsize, NS_BIT, 1);
1404	set_queue_last_ind(ctx->drvdata, &desc[idx]);
1405	set_flow_mode(&desc[idx], S_AES_to_DOUT);
1406	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1407	set_cipher_mode(&desc[idx], ctx->hw_mode);
1408	idx++;
1409
1410	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1411	if (rc != -EINPROGRESS && rc != -EBUSY) {
1412		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1413		cc_unmap_hash_request(dev, state, req->src, true);
1414		cc_unmap_result(dev, state, digestsize, req->result);
1415		cc_unmap_req(dev, state, ctx);
1416	}
1417	return rc;
1418}
1419
1420static int cc_mac_digest(struct ahash_request *req)
1421{
1422	struct ahash_req_ctx *state = ahash_request_ctx(req);
1423	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1424	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1425	struct device *dev = drvdata_to_dev(ctx->drvdata);
1426	u32 digestsize = crypto_ahash_digestsize(tfm);
1427	struct cc_crypto_req cc_req = {};
1428	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1429	u32 key_len;
1430	unsigned int idx = 0;
1431	int rc;
1432	gfp_t flags = cc_gfp_flags(&req->base);
1433
1434	dev_dbg(dev, "===== -digest mac (%d) ====\n",  req->nbytes);
1435
1436	cc_init_req(dev, state, ctx);
1437
1438	if (cc_map_req(dev, state, ctx)) {
1439		dev_err(dev, "map_ahash_source() failed\n");
1440		return -ENOMEM;
1441	}
1442	if (cc_map_result(dev, state, digestsize)) {
1443		dev_err(dev, "map_ahash_digest() failed\n");
1444		cc_unmap_req(dev, state, ctx);
1445		return -ENOMEM;
1446	}
1447
1448	if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
1449				      req->nbytes, 1, flags)) {
1450		dev_err(dev, "map_ahash_request_final() failed\n");
1451		cc_unmap_req(dev, state, ctx);
1452		return -ENOMEM;
1453	}
1454
1455	/* Setup request structure */
1456	cc_req.user_cb = cc_digest_complete;
1457	cc_req.user_arg = req;
1458
1459	if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1460		key_len = CC_AES_128_BIT_KEY_SIZE;
1461		cc_setup_xcbc(req, desc, &idx);
1462	} else {
1463		key_len = ctx->key_params.keylen;
1464		cc_setup_cmac(req, desc, &idx);
1465	}
1466
1467	if (req->nbytes == 0) {
1468		hw_desc_init(&desc[idx]);
1469		set_cipher_mode(&desc[idx], ctx->hw_mode);
1470		set_key_size_aes(&desc[idx], key_len);
1471		set_cmac_size0_mode(&desc[idx]);
1472		set_flow_mode(&desc[idx], S_DIN_to_AES);
1473		idx++;
1474	} else {
1475		cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1476	}
1477
1478	/* Get final MAC result */
1479	hw_desc_init(&desc[idx]);
1480	set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1481		      CC_AES_BLOCK_SIZE, NS_BIT, 1);
1482	set_queue_last_ind(ctx->drvdata, &desc[idx]);
1483	set_flow_mode(&desc[idx], S_AES_to_DOUT);
1484	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1485	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1486	set_cipher_mode(&desc[idx], ctx->hw_mode);
1487	idx++;
1488
1489	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1490	if (rc != -EINPROGRESS && rc != -EBUSY) {
1491		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1492		cc_unmap_hash_request(dev, state, req->src, true);
1493		cc_unmap_result(dev, state, digestsize, req->result);
1494		cc_unmap_req(dev, state, ctx);
1495	}
1496	return rc;
1497}
1498
1499static int cc_hash_export(struct ahash_request *req, void *out)
1500{
1501	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1502	struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1503	struct ahash_req_ctx *state = ahash_request_ctx(req);
1504	u8 *curr_buff = cc_hash_buf(state);
1505	u32 curr_buff_cnt = *cc_hash_buf_cnt(state);
1506	const u32 tmp = CC_EXPORT_MAGIC;
1507
1508	memcpy(out, &tmp, sizeof(u32));
1509	out += sizeof(u32);
1510
1511	memcpy(out, state->digest_buff, ctx->inter_digestsize);
1512	out += ctx->inter_digestsize;
1513
1514	memcpy(out, state->digest_bytes_len, ctx->hash_len);
1515	out += ctx->hash_len;
1516
1517	memcpy(out, &curr_buff_cnt, sizeof(u32));
1518	out += sizeof(u32);
1519
1520	memcpy(out, curr_buff, curr_buff_cnt);
1521
1522	return 0;
1523}
1524
1525static int cc_hash_import(struct ahash_request *req, const void *in)
1526{
1527	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1528	struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1529	struct device *dev = drvdata_to_dev(ctx->drvdata);
1530	struct ahash_req_ctx *state = ahash_request_ctx(req);
1531	u32 tmp;
1532
1533	memcpy(&tmp, in, sizeof(u32));
1534	if (tmp != CC_EXPORT_MAGIC)
1535		return -EINVAL;
1536	in += sizeof(u32);
1537
1538	cc_init_req(dev, state, ctx);
1539
1540	memcpy(state->digest_buff, in, ctx->inter_digestsize);
1541	in += ctx->inter_digestsize;
1542
1543	memcpy(state->digest_bytes_len, in, ctx->hash_len);
1544	in += ctx->hash_len;
1545
1546	/* Sanity check the data as much as possible */
1547	memcpy(&tmp, in, sizeof(u32));
1548	if (tmp > CC_MAX_HASH_BLCK_SIZE)
1549		return -EINVAL;
1550	in += sizeof(u32);
1551
1552	state->buf_cnt[0] = tmp;
1553	memcpy(state->buffers[0], in, tmp);
1554
1555	return 0;
1556}
1557
1558struct cc_hash_template {
1559	char name[CRYPTO_MAX_ALG_NAME];
1560	char driver_name[CRYPTO_MAX_ALG_NAME];
1561	char mac_name[CRYPTO_MAX_ALG_NAME];
1562	char mac_driver_name[CRYPTO_MAX_ALG_NAME];
1563	unsigned int blocksize;
1564	bool is_mac;
1565	bool synchronize;
1566	struct ahash_alg template_ahash;
1567	int hash_mode;
1568	int hw_mode;
1569	int inter_digestsize;
1570	struct cc_drvdata *drvdata;
1571	u32 min_hw_rev;
1572	enum cc_std_body std_body;
1573};
1574
1575#define CC_STATE_SIZE(_x) \
1576	((_x) + HASH_MAX_LEN_SIZE + CC_MAX_HASH_BLCK_SIZE + (2 * sizeof(u32)))
1577
1578/* hash descriptors */
1579static struct cc_hash_template driver_hash[] = {
1580	//Asynchronize hash template
1581	{
1582		.name = "sha1",
1583		.driver_name = "sha1-ccree",
1584		.mac_name = "hmac(sha1)",
1585		.mac_driver_name = "hmac-sha1-ccree",
1586		.blocksize = SHA1_BLOCK_SIZE,
1587		.is_mac = true,
1588		.synchronize = false,
1589		.template_ahash = {
1590			.init = cc_hash_init,
1591			.update = cc_hash_update,
1592			.final = cc_hash_final,
1593			.finup = cc_hash_finup,
1594			.digest = cc_hash_digest,
1595			.export = cc_hash_export,
1596			.import = cc_hash_import,
1597			.setkey = cc_hash_setkey,
1598			.halg = {
1599				.digestsize = SHA1_DIGEST_SIZE,
1600				.statesize = CC_STATE_SIZE(SHA1_DIGEST_SIZE),
1601			},
1602		},
1603		.hash_mode = DRV_HASH_SHA1,
1604		.hw_mode = DRV_HASH_HW_SHA1,
1605		.inter_digestsize = SHA1_DIGEST_SIZE,
1606		.min_hw_rev = CC_HW_REV_630,
1607		.std_body = CC_STD_NIST,
1608	},
1609	{
1610		.name = "sha256",
1611		.driver_name = "sha256-ccree",
1612		.mac_name = "hmac(sha256)",
1613		.mac_driver_name = "hmac-sha256-ccree",
1614		.blocksize = SHA256_BLOCK_SIZE,
1615		.is_mac = true,
1616		.template_ahash = {
1617			.init = cc_hash_init,
1618			.update = cc_hash_update,
1619			.final = cc_hash_final,
1620			.finup = cc_hash_finup,
1621			.digest = cc_hash_digest,
1622			.export = cc_hash_export,
1623			.import = cc_hash_import,
1624			.setkey = cc_hash_setkey,
1625			.halg = {
1626				.digestsize = SHA256_DIGEST_SIZE,
1627				.statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE)
1628			},
1629		},
1630		.hash_mode = DRV_HASH_SHA256,
1631		.hw_mode = DRV_HASH_HW_SHA256,
1632		.inter_digestsize = SHA256_DIGEST_SIZE,
1633		.min_hw_rev = CC_HW_REV_630,
1634		.std_body = CC_STD_NIST,
1635	},
1636	{
1637		.name = "sha224",
1638		.driver_name = "sha224-ccree",
1639		.mac_name = "hmac(sha224)",
1640		.mac_driver_name = "hmac-sha224-ccree",
1641		.blocksize = SHA224_BLOCK_SIZE,
1642		.is_mac = true,
1643		.template_ahash = {
1644			.init = cc_hash_init,
1645			.update = cc_hash_update,
1646			.final = cc_hash_final,
1647			.finup = cc_hash_finup,
1648			.digest = cc_hash_digest,
1649			.export = cc_hash_export,
1650			.import = cc_hash_import,
1651			.setkey = cc_hash_setkey,
1652			.halg = {
1653				.digestsize = SHA224_DIGEST_SIZE,
1654				.statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE),
1655			},
1656		},
1657		.hash_mode = DRV_HASH_SHA224,
1658		.hw_mode = DRV_HASH_HW_SHA256,
1659		.inter_digestsize = SHA256_DIGEST_SIZE,
1660		.min_hw_rev = CC_HW_REV_630,
1661		.std_body = CC_STD_NIST,
1662	},
1663	{
1664		.name = "sha384",
1665		.driver_name = "sha384-ccree",
1666		.mac_name = "hmac(sha384)",
1667		.mac_driver_name = "hmac-sha384-ccree",
1668		.blocksize = SHA384_BLOCK_SIZE,
1669		.is_mac = true,
1670		.template_ahash = {
1671			.init = cc_hash_init,
1672			.update = cc_hash_update,
1673			.final = cc_hash_final,
1674			.finup = cc_hash_finup,
1675			.digest = cc_hash_digest,
1676			.export = cc_hash_export,
1677			.import = cc_hash_import,
1678			.setkey = cc_hash_setkey,
1679			.halg = {
1680				.digestsize = SHA384_DIGEST_SIZE,
1681				.statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
1682			},
1683		},
1684		.hash_mode = DRV_HASH_SHA384,
1685		.hw_mode = DRV_HASH_HW_SHA512,
1686		.inter_digestsize = SHA512_DIGEST_SIZE,
1687		.min_hw_rev = CC_HW_REV_712,
1688		.std_body = CC_STD_NIST,
1689	},
1690	{
1691		.name = "sha512",
1692		.driver_name = "sha512-ccree",
1693		.mac_name = "hmac(sha512)",
1694		.mac_driver_name = "hmac-sha512-ccree",
1695		.blocksize = SHA512_BLOCK_SIZE,
1696		.is_mac = true,
1697		.template_ahash = {
1698			.init = cc_hash_init,
1699			.update = cc_hash_update,
1700			.final = cc_hash_final,
1701			.finup = cc_hash_finup,
1702			.digest = cc_hash_digest,
1703			.export = cc_hash_export,
1704			.import = cc_hash_import,
1705			.setkey = cc_hash_setkey,
1706			.halg = {
1707				.digestsize = SHA512_DIGEST_SIZE,
1708				.statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
1709			},
1710		},
1711		.hash_mode = DRV_HASH_SHA512,
1712		.hw_mode = DRV_HASH_HW_SHA512,
1713		.inter_digestsize = SHA512_DIGEST_SIZE,
1714		.min_hw_rev = CC_HW_REV_712,
1715		.std_body = CC_STD_NIST,
1716	},
1717	{
1718		.name = "md5",
1719		.driver_name = "md5-ccree",
1720		.mac_name = "hmac(md5)",
1721		.mac_driver_name = "hmac-md5-ccree",
1722		.blocksize = MD5_HMAC_BLOCK_SIZE,
1723		.is_mac = true,
1724		.template_ahash = {
1725			.init = cc_hash_init,
1726			.update = cc_hash_update,
1727			.final = cc_hash_final,
1728			.finup = cc_hash_finup,
1729			.digest = cc_hash_digest,
1730			.export = cc_hash_export,
1731			.import = cc_hash_import,
1732			.setkey = cc_hash_setkey,
1733			.halg = {
1734				.digestsize = MD5_DIGEST_SIZE,
1735				.statesize = CC_STATE_SIZE(MD5_DIGEST_SIZE),
1736			},
1737		},
1738		.hash_mode = DRV_HASH_MD5,
1739		.hw_mode = DRV_HASH_HW_MD5,
1740		.inter_digestsize = MD5_DIGEST_SIZE,
1741		.min_hw_rev = CC_HW_REV_630,
1742		.std_body = CC_STD_NIST,
1743	},
1744	{
1745		.name = "sm3",
1746		.driver_name = "sm3-ccree",
1747		.blocksize = SM3_BLOCK_SIZE,
1748		.is_mac = false,
1749		.template_ahash = {
1750			.init = cc_hash_init,
1751			.update = cc_hash_update,
1752			.final = cc_hash_final,
1753			.finup = cc_hash_finup,
1754			.digest = cc_hash_digest,
1755			.export = cc_hash_export,
1756			.import = cc_hash_import,
1757			.setkey = cc_hash_setkey,
1758			.halg = {
1759				.digestsize = SM3_DIGEST_SIZE,
1760				.statesize = CC_STATE_SIZE(SM3_DIGEST_SIZE),
1761			},
1762		},
1763		.hash_mode = DRV_HASH_SM3,
1764		.hw_mode = DRV_HASH_HW_SM3,
1765		.inter_digestsize = SM3_DIGEST_SIZE,
1766		.min_hw_rev = CC_HW_REV_713,
1767		.std_body = CC_STD_OSCCA,
1768	},
1769	{
1770		.mac_name = "xcbc(aes)",
1771		.mac_driver_name = "xcbc-aes-ccree",
1772		.blocksize = AES_BLOCK_SIZE,
1773		.is_mac = true,
1774		.template_ahash = {
1775			.init = cc_hash_init,
1776			.update = cc_mac_update,
1777			.final = cc_mac_final,
1778			.finup = cc_mac_finup,
1779			.digest = cc_mac_digest,
1780			.setkey = cc_xcbc_setkey,
1781			.export = cc_hash_export,
1782			.import = cc_hash_import,
1783			.halg = {
1784				.digestsize = AES_BLOCK_SIZE,
1785				.statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
1786			},
1787		},
1788		.hash_mode = DRV_HASH_NULL,
1789		.hw_mode = DRV_CIPHER_XCBC_MAC,
1790		.inter_digestsize = AES_BLOCK_SIZE,
1791		.min_hw_rev = CC_HW_REV_630,
1792		.std_body = CC_STD_NIST,
1793	},
1794	{
1795		.mac_name = "cmac(aes)",
1796		.mac_driver_name = "cmac-aes-ccree",
1797		.blocksize = AES_BLOCK_SIZE,
1798		.is_mac = true,
1799		.template_ahash = {
1800			.init = cc_hash_init,
1801			.update = cc_mac_update,
1802			.final = cc_mac_final,
1803			.finup = cc_mac_finup,
1804			.digest = cc_mac_digest,
1805			.setkey = cc_cmac_setkey,
1806			.export = cc_hash_export,
1807			.import = cc_hash_import,
1808			.halg = {
1809				.digestsize = AES_BLOCK_SIZE,
1810				.statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
1811			},
1812		},
1813		.hash_mode = DRV_HASH_NULL,
1814		.hw_mode = DRV_CIPHER_CMAC,
1815		.inter_digestsize = AES_BLOCK_SIZE,
1816		.min_hw_rev = CC_HW_REV_630,
1817		.std_body = CC_STD_NIST,
1818	},
1819};
1820
1821static struct cc_hash_alg *cc_alloc_hash_alg(struct cc_hash_template *template,
1822					     struct device *dev, bool keyed)
1823{
1824	struct cc_hash_alg *t_crypto_alg;
1825	struct crypto_alg *alg;
1826	struct ahash_alg *halg;
1827
1828	t_crypto_alg = devm_kzalloc(dev, sizeof(*t_crypto_alg), GFP_KERNEL);
1829	if (!t_crypto_alg)
1830		return ERR_PTR(-ENOMEM);
1831
1832	t_crypto_alg->ahash_alg = template->template_ahash;
1833	halg = &t_crypto_alg->ahash_alg;
1834	alg = &halg->halg.base;
1835
1836	if (keyed) {
1837		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1838			 template->mac_name);
1839		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1840			 template->mac_driver_name);
1841	} else {
1842		halg->setkey = NULL;
1843		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1844			 template->name);
1845		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1846			 template->driver_name);
1847	}
1848	alg->cra_module = THIS_MODULE;
1849	alg->cra_ctxsize = sizeof(struct cc_hash_ctx);
1850	alg->cra_priority = CC_CRA_PRIO;
1851	alg->cra_blocksize = template->blocksize;
1852	alg->cra_alignmask = 0;
1853	alg->cra_exit = cc_cra_exit;
1854
1855	alg->cra_init = cc_cra_init;
1856	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
 
 
1857
1858	t_crypto_alg->hash_mode = template->hash_mode;
1859	t_crypto_alg->hw_mode = template->hw_mode;
1860	t_crypto_alg->inter_digestsize = template->inter_digestsize;
1861
1862	return t_crypto_alg;
1863}
1864
1865static int cc_init_copy_sram(struct cc_drvdata *drvdata, const u32 *data,
1866			     unsigned int size, u32 *sram_buff_ofs)
1867{
1868	struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)];
1869	unsigned int larval_seq_len = 0;
1870	int rc;
1871
1872	cc_set_sram_desc(data, *sram_buff_ofs, size / sizeof(*data),
1873			 larval_seq, &larval_seq_len);
1874	rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1875	if (rc)
1876		return rc;
1877
1878	*sram_buff_ofs += size;
1879	return 0;
1880}
1881
1882int cc_init_hash_sram(struct cc_drvdata *drvdata)
1883{
1884	struct cc_hash_handle *hash_handle = drvdata->hash_handle;
1885	u32 sram_buff_ofs = hash_handle->digest_len_sram_addr;
 
 
1886	bool large_sha_supported = (drvdata->hw_rev >= CC_HW_REV_712);
1887	bool sm3_supported = (drvdata->hw_rev >= CC_HW_REV_713);
1888	int rc = 0;
1889
1890	/* Copy-to-sram digest-len */
1891	rc = cc_init_copy_sram(drvdata, cc_digest_len_init,
1892			       sizeof(cc_digest_len_init), &sram_buff_ofs);
 
 
1893	if (rc)
1894		goto init_digest_const_err;
1895
 
 
 
1896	if (large_sha_supported) {
1897		/* Copy-to-sram digest-len for sha384/512 */
1898		rc = cc_init_copy_sram(drvdata, cc_digest_len_sha512_init,
1899				       sizeof(cc_digest_len_sha512_init),
1900				       &sram_buff_ofs);
 
1901		if (rc)
1902			goto init_digest_const_err;
 
 
 
1903	}
1904
1905	/* The initial digests offset */
1906	hash_handle->larval_digest_sram_addr = sram_buff_ofs;
1907
1908	/* Copy-to-sram initial SHA* digests */
1909	rc = cc_init_copy_sram(drvdata, cc_md5_init, sizeof(cc_md5_init),
1910			       &sram_buff_ofs);
 
1911	if (rc)
1912		goto init_digest_const_err;
 
 
1913
1914	rc = cc_init_copy_sram(drvdata, cc_sha1_init, sizeof(cc_sha1_init),
1915			       &sram_buff_ofs);
 
 
1916	if (rc)
1917		goto init_digest_const_err;
 
 
1918
1919	rc = cc_init_copy_sram(drvdata, cc_sha224_init, sizeof(cc_sha224_init),
1920			       &sram_buff_ofs);
 
 
1921	if (rc)
1922		goto init_digest_const_err;
 
 
1923
1924	rc = cc_init_copy_sram(drvdata, cc_sha256_init, sizeof(cc_sha256_init),
1925			       &sram_buff_ofs);
 
 
1926	if (rc)
1927		goto init_digest_const_err;
1928
1929	if (sm3_supported) {
1930		rc = cc_init_copy_sram(drvdata, cc_sm3_init,
1931				       sizeof(cc_sm3_init), &sram_buff_ofs);
1932		if (rc)
1933			goto init_digest_const_err;
1934	}
1935
1936	if (large_sha_supported) {
1937		rc = cc_init_copy_sram(drvdata, cc_sha384_init,
1938				       sizeof(cc_sha384_init), &sram_buff_ofs);
 
 
1939		if (rc)
1940			goto init_digest_const_err;
 
 
1941
1942		rc = cc_init_copy_sram(drvdata, cc_sha512_init,
1943				       sizeof(cc_sha512_init), &sram_buff_ofs);
 
 
1944		if (rc)
1945			goto init_digest_const_err;
1946	}
1947
1948init_digest_const_err:
1949	return rc;
1950}
1951
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1952int cc_hash_alloc(struct cc_drvdata *drvdata)
1953{
1954	struct cc_hash_handle *hash_handle;
1955	u32 sram_buff;
1956	u32 sram_size_to_alloc;
1957	struct device *dev = drvdata_to_dev(drvdata);
1958	int rc = 0;
1959	int alg;
1960
1961	hash_handle = devm_kzalloc(dev, sizeof(*hash_handle), GFP_KERNEL);
1962	if (!hash_handle)
1963		return -ENOMEM;
1964
1965	INIT_LIST_HEAD(&hash_handle->hash_list);
1966	drvdata->hash_handle = hash_handle;
1967
1968	sram_size_to_alloc = sizeof(cc_digest_len_init) +
1969			sizeof(cc_md5_init) +
1970			sizeof(cc_sha1_init) +
1971			sizeof(cc_sha224_init) +
1972			sizeof(cc_sha256_init);
1973
1974	if (drvdata->hw_rev >= CC_HW_REV_713)
1975		sram_size_to_alloc += sizeof(cc_sm3_init);
1976
1977	if (drvdata->hw_rev >= CC_HW_REV_712)
1978		sram_size_to_alloc += sizeof(cc_digest_len_sha512_init) +
1979			sizeof(cc_sha384_init) + sizeof(cc_sha512_init);
1980
1981	sram_buff = cc_sram_alloc(drvdata, sram_size_to_alloc);
1982	if (sram_buff == NULL_SRAM_ADDR) {
 
1983		rc = -ENOMEM;
1984		goto fail;
1985	}
1986
1987	/* The initial digest-len offset */
1988	hash_handle->digest_len_sram_addr = sram_buff;
1989
1990	/*must be set before the alg registration as it is being used there*/
1991	rc = cc_init_hash_sram(drvdata);
1992	if (rc) {
1993		dev_err(dev, "Init digest CONST failed (rc=%d)\n", rc);
1994		goto fail;
1995	}
1996
1997	/* ahash registration */
1998	for (alg = 0; alg < ARRAY_SIZE(driver_hash); alg++) {
1999		struct cc_hash_alg *t_alg;
2000		int hw_mode = driver_hash[alg].hw_mode;
2001
2002		/* Check that the HW revision and variants are suitable */
2003		if ((driver_hash[alg].min_hw_rev > drvdata->hw_rev) ||
2004		    !(drvdata->std_bodies & driver_hash[alg].std_body))
2005			continue;
2006
2007		if (driver_hash[alg].is_mac) {
2008			/* register hmac version */
2009			t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, true);
2010			if (IS_ERR(t_alg)) {
2011				rc = PTR_ERR(t_alg);
2012				dev_err(dev, "%s alg allocation failed\n",
2013					driver_hash[alg].driver_name);
2014				goto fail;
2015			}
2016			t_alg->drvdata = drvdata;
2017
2018			rc = crypto_register_ahash(&t_alg->ahash_alg);
2019			if (rc) {
2020				dev_err(dev, "%s alg registration failed\n",
2021					driver_hash[alg].driver_name);
2022				goto fail;
2023			}
2024
 
 
 
 
 
 
 
2025			list_add_tail(&t_alg->entry, &hash_handle->hash_list);
2026		}
 
2027		if (hw_mode == DRV_CIPHER_XCBC_MAC ||
2028		    hw_mode == DRV_CIPHER_CMAC)
2029			continue;
2030
2031		/* register hash version */
2032		t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, false);
2033		if (IS_ERR(t_alg)) {
2034			rc = PTR_ERR(t_alg);
2035			dev_err(dev, "%s alg allocation failed\n",
2036				driver_hash[alg].driver_name);
2037			goto fail;
2038		}
2039		t_alg->drvdata = drvdata;
2040
2041		rc = crypto_register_ahash(&t_alg->ahash_alg);
2042		if (rc) {
2043			dev_err(dev, "%s alg registration failed\n",
2044				driver_hash[alg].driver_name);
 
2045			goto fail;
 
 
2046		}
2047
2048		list_add_tail(&t_alg->entry, &hash_handle->hash_list);
2049	}
2050
2051	return 0;
2052
2053fail:
2054	cc_hash_free(drvdata);
 
2055	return rc;
2056}
2057
2058int cc_hash_free(struct cc_drvdata *drvdata)
2059{
2060	struct cc_hash_alg *t_hash_alg, *hash_n;
2061	struct cc_hash_handle *hash_handle = drvdata->hash_handle;
2062
2063	list_for_each_entry_safe(t_hash_alg, hash_n, &hash_handle->hash_list,
2064				 entry) {
2065		crypto_unregister_ahash(&t_hash_alg->ahash_alg);
2066		list_del(&t_hash_alg->entry);
2067	}
 
 
2068
 
 
 
2069	return 0;
2070}
2071
2072static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
2073			  unsigned int *seq_size)
2074{
2075	unsigned int idx = *seq_size;
2076	struct ahash_req_ctx *state = ahash_request_ctx(areq);
2077	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2078	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
2079
2080	/* Setup XCBC MAC K1 */
2081	hw_desc_init(&desc[idx]);
2082	set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
2083					    XCBC_MAC_K1_OFFSET),
2084		     CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2085	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
2086	set_hash_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC, ctx->hash_mode);
2087	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2088	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2089	set_flow_mode(&desc[idx], S_DIN_to_AES);
2090	idx++;
2091
2092	/* Setup XCBC MAC K2 */
2093	hw_desc_init(&desc[idx]);
2094	set_din_type(&desc[idx], DMA_DLLI,
2095		     (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET),
2096		     CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2097	set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
2098	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2099	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2100	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2101	set_flow_mode(&desc[idx], S_DIN_to_AES);
2102	idx++;
2103
2104	/* Setup XCBC MAC K3 */
2105	hw_desc_init(&desc[idx]);
2106	set_din_type(&desc[idx], DMA_DLLI,
2107		     (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET),
2108		     CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2109	set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
2110	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2111	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2112	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2113	set_flow_mode(&desc[idx], S_DIN_to_AES);
2114	idx++;
2115
2116	/* Loading MAC state */
2117	hw_desc_init(&desc[idx]);
2118	set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
2119		     CC_AES_BLOCK_SIZE, NS_BIT);
2120	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
2121	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2122	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2123	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2124	set_flow_mode(&desc[idx], S_DIN_to_AES);
2125	idx++;
2126	*seq_size = idx;
2127}
2128
2129static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[],
2130			  unsigned int *seq_size)
2131{
2132	unsigned int idx = *seq_size;
2133	struct ahash_req_ctx *state = ahash_request_ctx(areq);
2134	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2135	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
2136
2137	/* Setup CMAC Key */
2138	hw_desc_init(&desc[idx]);
2139	set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
2140		     ((ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
2141		      ctx->key_params.keylen), NS_BIT);
2142	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
2143	set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
2144	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2145	set_key_size_aes(&desc[idx], ctx->key_params.keylen);
2146	set_flow_mode(&desc[idx], S_DIN_to_AES);
2147	idx++;
2148
2149	/* Load MAC state */
2150	hw_desc_init(&desc[idx]);
2151	set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
2152		     CC_AES_BLOCK_SIZE, NS_BIT);
2153	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
2154	set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
2155	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2156	set_key_size_aes(&desc[idx], ctx->key_params.keylen);
2157	set_flow_mode(&desc[idx], S_DIN_to_AES);
2158	idx++;
2159	*seq_size = idx;
2160}
2161
2162static void cc_set_desc(struct ahash_req_ctx *areq_ctx,
2163			struct cc_hash_ctx *ctx, unsigned int flow_mode,
2164			struct cc_hw_desc desc[], bool is_not_last_data,
2165			unsigned int *seq_size)
2166{
2167	unsigned int idx = *seq_size;
2168	struct device *dev = drvdata_to_dev(ctx->drvdata);
2169
2170	if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_DLLI) {
2171		hw_desc_init(&desc[idx]);
2172		set_din_type(&desc[idx], DMA_DLLI,
2173			     sg_dma_address(areq_ctx->curr_sg),
2174			     areq_ctx->curr_sg->length, NS_BIT);
2175		set_flow_mode(&desc[idx], flow_mode);
2176		idx++;
2177	} else {
2178		if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
2179			dev_dbg(dev, " NULL mode\n");
2180			/* nothing to build */
2181			return;
2182		}
2183		/* bypass */
2184		hw_desc_init(&desc[idx]);
2185		set_din_type(&desc[idx], DMA_DLLI,
2186			     areq_ctx->mlli_params.mlli_dma_addr,
2187			     areq_ctx->mlli_params.mlli_len, NS_BIT);
2188		set_dout_sram(&desc[idx], ctx->drvdata->mlli_sram_addr,
2189			      areq_ctx->mlli_params.mlli_len);
2190		set_flow_mode(&desc[idx], BYPASS);
2191		idx++;
2192		/* process */
2193		hw_desc_init(&desc[idx]);
2194		set_din_type(&desc[idx], DMA_MLLI,
2195			     ctx->drvdata->mlli_sram_addr,
2196			     areq_ctx->mlli_nents, NS_BIT);
2197		set_flow_mode(&desc[idx], flow_mode);
2198		idx++;
2199	}
2200	if (is_not_last_data)
2201		set_din_not_last_indication(&desc[(idx - 1)]);
2202	/* return updated desc sequence size */
2203	*seq_size = idx;
2204}
2205
2206static const void *cc_larval_digest(struct device *dev, u32 mode)
2207{
2208	switch (mode) {
2209	case DRV_HASH_MD5:
2210		return cc_md5_init;
2211	case DRV_HASH_SHA1:
2212		return cc_sha1_init;
2213	case DRV_HASH_SHA224:
2214		return cc_sha224_init;
2215	case DRV_HASH_SHA256:
2216		return cc_sha256_init;
2217	case DRV_HASH_SHA384:
2218		return cc_sha384_init;
2219	case DRV_HASH_SHA512:
2220		return cc_sha512_init;
2221	case DRV_HASH_SM3:
2222		return cc_sm3_init;
2223	default:
2224		dev_err(dev, "Invalid hash mode (%d)\n", mode);
2225		return cc_md5_init;
2226	}
2227}
2228
2229/**
2230 * cc_larval_digest_addr() - Get the address of the initial digest in SRAM
2231 * according to the given hash mode
2232 *
2233 * @drvdata: Associated device driver context
2234 * @mode: The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256
2235 *
2236 * Return:
2237 * The address of the initial digest in SRAM
2238 */
2239u32 cc_larval_digest_addr(void *drvdata, u32 mode)
2240{
2241	struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
2242	struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
2243	struct device *dev = drvdata_to_dev(_drvdata);
2244	bool sm3_supported = (_drvdata->hw_rev >= CC_HW_REV_713);
2245	u32 addr;
2246
2247	switch (mode) {
2248	case DRV_HASH_NULL:
2249		break; /*Ignore*/
2250	case DRV_HASH_MD5:
2251		return (hash_handle->larval_digest_sram_addr);
2252	case DRV_HASH_SHA1:
2253		return (hash_handle->larval_digest_sram_addr +
2254			sizeof(cc_md5_init));
2255	case DRV_HASH_SHA224:
2256		return (hash_handle->larval_digest_sram_addr +
2257			sizeof(cc_md5_init) +
2258			sizeof(cc_sha1_init));
2259	case DRV_HASH_SHA256:
2260		return (hash_handle->larval_digest_sram_addr +
2261			sizeof(cc_md5_init) +
2262			sizeof(cc_sha1_init) +
2263			sizeof(cc_sha224_init));
2264	case DRV_HASH_SM3:
2265		return (hash_handle->larval_digest_sram_addr +
2266			sizeof(cc_md5_init) +
2267			sizeof(cc_sha1_init) +
2268			sizeof(cc_sha224_init) +
2269			sizeof(cc_sha256_init));
2270	case DRV_HASH_SHA384:
2271		addr = (hash_handle->larval_digest_sram_addr +
2272			sizeof(cc_md5_init) +
2273			sizeof(cc_sha1_init) +
2274			sizeof(cc_sha224_init) +
2275			sizeof(cc_sha256_init));
2276		if (sm3_supported)
2277			addr += sizeof(cc_sm3_init);
2278		return addr;
2279	case DRV_HASH_SHA512:
2280		addr = (hash_handle->larval_digest_sram_addr +
2281			sizeof(cc_md5_init) +
2282			sizeof(cc_sha1_init) +
2283			sizeof(cc_sha224_init) +
2284			sizeof(cc_sha256_init) +
2285			sizeof(cc_sha384_init));
2286		if (sm3_supported)
2287			addr += sizeof(cc_sm3_init);
2288		return addr;
2289	default:
2290		dev_err(dev, "Invalid hash mode (%d)\n", mode);
2291	}
2292
2293	/*This is valid wrong value to avoid kernel crash*/
2294	return hash_handle->larval_digest_sram_addr;
2295}
2296
2297u32 cc_digest_len_addr(void *drvdata, u32 mode)
 
2298{
2299	struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
2300	struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
2301	u32 digest_len_addr = hash_handle->digest_len_sram_addr;
2302
2303	switch (mode) {
2304	case DRV_HASH_SHA1:
2305	case DRV_HASH_SHA224:
2306	case DRV_HASH_SHA256:
2307	case DRV_HASH_MD5:
2308		return digest_len_addr;
 
2309	case DRV_HASH_SHA384:
2310	case DRV_HASH_SHA512:
2311		return  digest_len_addr + sizeof(cc_digest_len_init);
 
2312	default:
2313		return digest_len_addr; /*to avoid kernel crash*/
2314	}
2315}
v4.17
   1// SPDX-License-Identifier: GPL-2.0
   2/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
   3
   4#include <linux/kernel.h>
   5#include <linux/module.h>
   6#include <crypto/algapi.h>
   7#include <crypto/hash.h>
   8#include <crypto/md5.h>
 
   9#include <crypto/internal/hash.h>
  10
  11#include "cc_driver.h"
  12#include "cc_request_mgr.h"
  13#include "cc_buffer_mgr.h"
  14#include "cc_hash.h"
  15#include "cc_sram_mgr.h"
  16
  17#define CC_MAX_HASH_SEQ_LEN 12
  18#define CC_MAX_OPAD_KEYS_SIZE CC_MAX_HASH_BLCK_SIZE
 
  19
  20struct cc_hash_handle {
  21	cc_sram_addr_t digest_len_sram_addr; /* const value in SRAM*/
  22	cc_sram_addr_t larval_digest_sram_addr;   /* const value in SRAM */
  23	struct list_head hash_list;
  24};
  25
  26static const u32 digest_len_init[] = {
  27	0x00000040, 0x00000000, 0x00000000, 0x00000000 };
  28static const u32 md5_init[] = {
  29	SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
  30static const u32 sha1_init[] = {
  31	SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
  32static const u32 sha224_init[] = {
  33	SHA224_H7, SHA224_H6, SHA224_H5, SHA224_H4,
  34	SHA224_H3, SHA224_H2, SHA224_H1, SHA224_H0 };
  35static const u32 sha256_init[] = {
  36	SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4,
  37	SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
  38static const u32 digest_len_sha512_init[] = {
  39	0x00000080, 0x00000000, 0x00000000, 0x00000000 };
  40static u64 sha384_init[] = {
  41	SHA384_H7, SHA384_H6, SHA384_H5, SHA384_H4,
  42	SHA384_H3, SHA384_H2, SHA384_H1, SHA384_H0 };
  43static u64 sha512_init[] = {
  44	SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4,
  45	SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 };
 
 
 
 
 
 
 
 
 
 
  46
  47static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
  48			  unsigned int *seq_size);
  49
  50static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[],
  51			  unsigned int *seq_size);
  52
  53static const void *cc_larval_digest(struct device *dev, u32 mode);
  54
  55struct cc_hash_alg {
  56	struct list_head entry;
  57	int hash_mode;
  58	int hw_mode;
  59	int inter_digestsize;
  60	struct cc_drvdata *drvdata;
  61	struct ahash_alg ahash_alg;
  62};
  63
  64struct hash_key_req_ctx {
  65	u32 keylen;
  66	dma_addr_t key_dma_addr;
 
  67};
  68
  69/* hash per-session context */
  70struct cc_hash_ctx {
  71	struct cc_drvdata *drvdata;
  72	/* holds the origin digest; the digest after "setkey" if HMAC,*
  73	 * the initial digest if HASH.
  74	 */
  75	u8 digest_buff[CC_MAX_HASH_DIGEST_SIZE]  ____cacheline_aligned;
  76	u8 opad_tmp_keys_buff[CC_MAX_OPAD_KEYS_SIZE]  ____cacheline_aligned;
  77
  78	dma_addr_t opad_tmp_keys_dma_addr  ____cacheline_aligned;
  79	dma_addr_t digest_buff_dma_addr;
  80	/* use for hmac with key large then mode block size */
  81	struct hash_key_req_ctx key_params;
  82	int hash_mode;
  83	int hw_mode;
  84	int inter_digestsize;
 
  85	struct completion setkey_comp;
  86	bool is_hmac;
  87};
  88
  89static void cc_set_desc(struct ahash_req_ctx *areq_ctx, struct cc_hash_ctx *ctx,
  90			unsigned int flow_mode, struct cc_hw_desc desc[],
  91			bool is_not_last_data, unsigned int *seq_size);
  92
  93static void cc_set_endianity(u32 mode, struct cc_hw_desc *desc)
  94{
  95	if (mode == DRV_HASH_MD5 || mode == DRV_HASH_SHA384 ||
  96	    mode == DRV_HASH_SHA512) {
  97		set_bytes_swap(desc, 1);
  98	} else {
  99		set_cipher_config0(desc, HASH_DIGEST_RESULT_LITTLE_ENDIAN);
 100	}
 101}
 102
 103static int cc_map_result(struct device *dev, struct ahash_req_ctx *state,
 104			 unsigned int digestsize)
 105{
 106	state->digest_result_dma_addr =
 107		dma_map_single(dev, state->digest_result_buff,
 108			       digestsize, DMA_BIDIRECTIONAL);
 109	if (dma_mapping_error(dev, state->digest_result_dma_addr)) {
 110		dev_err(dev, "Mapping digest result buffer %u B for DMA failed\n",
 111			digestsize);
 112		return -ENOMEM;
 113	}
 114	dev_dbg(dev, "Mapped digest result buffer %u B at va=%pK to dma=%pad\n",
 115		digestsize, state->digest_result_buff,
 116		&state->digest_result_dma_addr);
 117
 118	return 0;
 119}
 120
 121static void cc_init_req(struct device *dev, struct ahash_req_ctx *state,
 122			struct cc_hash_ctx *ctx)
 123{
 124	bool is_hmac = ctx->is_hmac;
 125
 126	memset(state, 0, sizeof(*state));
 127
 128	if (is_hmac) {
 129		if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC &&
 130		    ctx->hw_mode != DRV_CIPHER_CMAC) {
 131			dma_sync_single_for_cpu(dev, ctx->digest_buff_dma_addr,
 132						ctx->inter_digestsize,
 133						DMA_BIDIRECTIONAL);
 134
 135			memcpy(state->digest_buff, ctx->digest_buff,
 136			       ctx->inter_digestsize);
 137			if (ctx->hash_mode == DRV_HASH_SHA512 ||
 138			    ctx->hash_mode == DRV_HASH_SHA384)
 139				memcpy(state->digest_bytes_len,
 140				       digest_len_sha512_init,
 141				       ctx->drvdata->hash_len_sz);
 142			else
 143				memcpy(state->digest_bytes_len, digest_len_init,
 144				       ctx->drvdata->hash_len_sz);
 
 145		}
 146
 147		if (ctx->hash_mode != DRV_HASH_NULL) {
 148			dma_sync_single_for_cpu(dev,
 149						ctx->opad_tmp_keys_dma_addr,
 150						ctx->inter_digestsize,
 151						DMA_BIDIRECTIONAL);
 152			memcpy(state->opad_digest_buff,
 153			       ctx->opad_tmp_keys_buff, ctx->inter_digestsize);
 154		}
 155	} else { /*hash*/
 156		/* Copy the initial digests if hash flow. */
 157		const void *larval = cc_larval_digest(dev, ctx->hash_mode);
 158
 159		memcpy(state->digest_buff, larval, ctx->inter_digestsize);
 160	}
 161}
 162
 163static int cc_map_req(struct device *dev, struct ahash_req_ctx *state,
 164		      struct cc_hash_ctx *ctx)
 165{
 166	bool is_hmac = ctx->is_hmac;
 167
 168	state->digest_buff_dma_addr =
 169		dma_map_single(dev, state->digest_buff,
 170			       ctx->inter_digestsize, DMA_BIDIRECTIONAL);
 171	if (dma_mapping_error(dev, state->digest_buff_dma_addr)) {
 172		dev_err(dev, "Mapping digest len %d B at va=%pK for DMA failed\n",
 173			ctx->inter_digestsize, state->digest_buff);
 174		return -EINVAL;
 175	}
 176	dev_dbg(dev, "Mapped digest %d B at va=%pK to dma=%pad\n",
 177		ctx->inter_digestsize, state->digest_buff,
 178		&state->digest_buff_dma_addr);
 179
 180	if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
 181		state->digest_bytes_len_dma_addr =
 182			dma_map_single(dev, state->digest_bytes_len,
 183				       HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
 184		if (dma_mapping_error(dev, state->digest_bytes_len_dma_addr)) {
 185			dev_err(dev, "Mapping digest len %u B at va=%pK for DMA failed\n",
 186				HASH_MAX_LEN_SIZE, state->digest_bytes_len);
 187			goto unmap_digest_buf;
 188		}
 189		dev_dbg(dev, "Mapped digest len %u B at va=%pK to dma=%pad\n",
 190			HASH_MAX_LEN_SIZE, state->digest_bytes_len,
 191			&state->digest_bytes_len_dma_addr);
 192	}
 193
 194	if (is_hmac && ctx->hash_mode != DRV_HASH_NULL) {
 195		state->opad_digest_dma_addr =
 196			dma_map_single(dev, state->opad_digest_buff,
 197				       ctx->inter_digestsize,
 198				       DMA_BIDIRECTIONAL);
 199		if (dma_mapping_error(dev, state->opad_digest_dma_addr)) {
 200			dev_err(dev, "Mapping opad digest %d B at va=%pK for DMA failed\n",
 201				ctx->inter_digestsize,
 202				state->opad_digest_buff);
 203			goto unmap_digest_len;
 204		}
 205		dev_dbg(dev, "Mapped opad digest %d B at va=%pK to dma=%pad\n",
 206			ctx->inter_digestsize, state->opad_digest_buff,
 207			&state->opad_digest_dma_addr);
 208	}
 209
 210	return 0;
 211
 212unmap_digest_len:
 213	if (state->digest_bytes_len_dma_addr) {
 214		dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
 215				 HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
 216		state->digest_bytes_len_dma_addr = 0;
 217	}
 218unmap_digest_buf:
 219	if (state->digest_buff_dma_addr) {
 220		dma_unmap_single(dev, state->digest_buff_dma_addr,
 221				 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
 222		state->digest_buff_dma_addr = 0;
 223	}
 224
 225	return -EINVAL;
 226}
 227
 228static void cc_unmap_req(struct device *dev, struct ahash_req_ctx *state,
 229			 struct cc_hash_ctx *ctx)
 230{
 231	if (state->digest_buff_dma_addr) {
 232		dma_unmap_single(dev, state->digest_buff_dma_addr,
 233				 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
 234		dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
 235			&state->digest_buff_dma_addr);
 236		state->digest_buff_dma_addr = 0;
 237	}
 238	if (state->digest_bytes_len_dma_addr) {
 239		dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
 240				 HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
 241		dev_dbg(dev, "Unmapped digest-bytes-len buffer: digest_bytes_len_dma_addr=%pad\n",
 242			&state->digest_bytes_len_dma_addr);
 243		state->digest_bytes_len_dma_addr = 0;
 244	}
 245	if (state->opad_digest_dma_addr) {
 246		dma_unmap_single(dev, state->opad_digest_dma_addr,
 247				 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
 248		dev_dbg(dev, "Unmapped opad-digest: opad_digest_dma_addr=%pad\n",
 249			&state->opad_digest_dma_addr);
 250		state->opad_digest_dma_addr = 0;
 251	}
 252}
 253
 254static void cc_unmap_result(struct device *dev, struct ahash_req_ctx *state,
 255			    unsigned int digestsize, u8 *result)
 256{
 257	if (state->digest_result_dma_addr) {
 258		dma_unmap_single(dev, state->digest_result_dma_addr, digestsize,
 259				 DMA_BIDIRECTIONAL);
 260		dev_dbg(dev, "unmpa digest result buffer va (%pK) pa (%pad) len %u\n",
 261			state->digest_result_buff,
 262			&state->digest_result_dma_addr, digestsize);
 263		memcpy(result, state->digest_result_buff, digestsize);
 264	}
 265	state->digest_result_dma_addr = 0;
 266}
 267
 268static void cc_update_complete(struct device *dev, void *cc_req, int err)
 269{
 270	struct ahash_request *req = (struct ahash_request *)cc_req;
 271	struct ahash_req_ctx *state = ahash_request_ctx(req);
 272	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 273	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 274
 275	dev_dbg(dev, "req=%pK\n", req);
 276
 277	cc_unmap_hash_request(dev, state, req->src, false);
 278	cc_unmap_req(dev, state, ctx);
 279	req->base.complete(&req->base, err);
 
 
 
 
 280}
 281
 282static void cc_digest_complete(struct device *dev, void *cc_req, int err)
 283{
 284	struct ahash_request *req = (struct ahash_request *)cc_req;
 285	struct ahash_req_ctx *state = ahash_request_ctx(req);
 286	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 287	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 288	u32 digestsize = crypto_ahash_digestsize(tfm);
 289
 290	dev_dbg(dev, "req=%pK\n", req);
 291
 292	cc_unmap_hash_request(dev, state, req->src, false);
 293	cc_unmap_result(dev, state, digestsize, req->result);
 294	cc_unmap_req(dev, state, ctx);
 295	req->base.complete(&req->base, err);
 
 
 
 
 296}
 297
 298static void cc_hash_complete(struct device *dev, void *cc_req, int err)
 299{
 300	struct ahash_request *req = (struct ahash_request *)cc_req;
 301	struct ahash_req_ctx *state = ahash_request_ctx(req);
 302	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 303	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 304	u32 digestsize = crypto_ahash_digestsize(tfm);
 305
 306	dev_dbg(dev, "req=%pK\n", req);
 307
 308	cc_unmap_hash_request(dev, state, req->src, false);
 309	cc_unmap_result(dev, state, digestsize, req->result);
 310	cc_unmap_req(dev, state, ctx);
 311	req->base.complete(&req->base, err);
 
 
 
 
 312}
 313
 314static int cc_fin_result(struct cc_hw_desc *desc, struct ahash_request *req,
 315			 int idx)
 316{
 317	struct ahash_req_ctx *state = ahash_request_ctx(req);
 318	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 319	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 320	u32 digestsize = crypto_ahash_digestsize(tfm);
 321
 322	/* Get final MAC result */
 323	hw_desc_init(&desc[idx]);
 324	set_cipher_mode(&desc[idx], ctx->hw_mode);
 325	/* TODO */
 326	set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
 327		      NS_BIT, 1);
 328	set_queue_last_ind(ctx->drvdata, &desc[idx]);
 329	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 330	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 331	set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
 332	cc_set_endianity(ctx->hash_mode, &desc[idx]);
 333	idx++;
 334
 335	return idx;
 336}
 337
 338static int cc_fin_hmac(struct cc_hw_desc *desc, struct ahash_request *req,
 339		       int idx)
 340{
 341	struct ahash_req_ctx *state = ahash_request_ctx(req);
 342	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 343	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 344	u32 digestsize = crypto_ahash_digestsize(tfm);
 345
 346	/* store the hash digest result in the context */
 347	hw_desc_init(&desc[idx]);
 348	set_cipher_mode(&desc[idx], ctx->hw_mode);
 349	set_dout_dlli(&desc[idx], state->digest_buff_dma_addr, digestsize,
 350		      NS_BIT, 0);
 351	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 352	cc_set_endianity(ctx->hash_mode, &desc[idx]);
 353	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 354	idx++;
 355
 356	/* Loading hash opad xor key state */
 357	hw_desc_init(&desc[idx]);
 358	set_cipher_mode(&desc[idx], ctx->hw_mode);
 359	set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
 360		     ctx->inter_digestsize, NS_BIT);
 361	set_flow_mode(&desc[idx], S_DIN_to_HASH);
 362	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 363	idx++;
 364
 365	/* Load the hash current length */
 366	hw_desc_init(&desc[idx]);
 367	set_cipher_mode(&desc[idx], ctx->hw_mode);
 368	set_din_sram(&desc[idx],
 369		     cc_digest_len_addr(ctx->drvdata, ctx->hash_mode),
 370		     ctx->drvdata->hash_len_sz);
 371	set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
 372	set_flow_mode(&desc[idx], S_DIN_to_HASH);
 373	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 374	idx++;
 375
 376	/* Memory Barrier: wait for IPAD/OPAD axi write to complete */
 377	hw_desc_init(&desc[idx]);
 378	set_din_no_dma(&desc[idx], 0, 0xfffff0);
 379	set_dout_no_dma(&desc[idx], 0, 0, 1);
 380	idx++;
 381
 382	/* Perform HASH update */
 383	hw_desc_init(&desc[idx]);
 384	set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
 385		     digestsize, NS_BIT);
 386	set_flow_mode(&desc[idx], DIN_HASH);
 387	idx++;
 388
 389	return idx;
 390}
 391
 392static int cc_hash_digest(struct ahash_request *req)
 393{
 394	struct ahash_req_ctx *state = ahash_request_ctx(req);
 395	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 396	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 397	u32 digestsize = crypto_ahash_digestsize(tfm);
 398	struct scatterlist *src = req->src;
 399	unsigned int nbytes = req->nbytes;
 400	u8 *result = req->result;
 401	struct device *dev = drvdata_to_dev(ctx->drvdata);
 402	bool is_hmac = ctx->is_hmac;
 403	struct cc_crypto_req cc_req = {};
 404	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
 405	cc_sram_addr_t larval_digest_addr =
 406		cc_larval_digest_addr(ctx->drvdata, ctx->hash_mode);
 407	int idx = 0;
 408	int rc = 0;
 409	gfp_t flags = cc_gfp_flags(&req->base);
 410
 411	dev_dbg(dev, "===== %s-digest (%d) ====\n", is_hmac ? "hmac" : "hash",
 412		nbytes);
 413
 414	cc_init_req(dev, state, ctx);
 415
 416	if (cc_map_req(dev, state, ctx)) {
 417		dev_err(dev, "map_ahash_source() failed\n");
 418		return -ENOMEM;
 419	}
 420
 421	if (cc_map_result(dev, state, digestsize)) {
 422		dev_err(dev, "map_ahash_digest() failed\n");
 423		cc_unmap_req(dev, state, ctx);
 424		return -ENOMEM;
 425	}
 426
 427	if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1,
 428				      flags)) {
 429		dev_err(dev, "map_ahash_request_final() failed\n");
 430		cc_unmap_result(dev, state, digestsize, result);
 431		cc_unmap_req(dev, state, ctx);
 432		return -ENOMEM;
 433	}
 434
 435	/* Setup request structure */
 436	cc_req.user_cb = cc_digest_complete;
 437	cc_req.user_arg = req;
 438
 439	/* If HMAC then load hash IPAD xor key, if HASH then load initial
 440	 * digest
 441	 */
 442	hw_desc_init(&desc[idx]);
 443	set_cipher_mode(&desc[idx], ctx->hw_mode);
 444	if (is_hmac) {
 445		set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
 446			     ctx->inter_digestsize, NS_BIT);
 447	} else {
 
 
 448		set_din_sram(&desc[idx], larval_digest_addr,
 449			     ctx->inter_digestsize);
 450	}
 451	set_flow_mode(&desc[idx], S_DIN_to_HASH);
 452	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 453	idx++;
 454
 455	/* Load the hash current length */
 456	hw_desc_init(&desc[idx]);
 457	set_cipher_mode(&desc[idx], ctx->hw_mode);
 458
 459	if (is_hmac) {
 460		set_din_type(&desc[idx], DMA_DLLI,
 461			     state->digest_bytes_len_dma_addr,
 462			     ctx->drvdata->hash_len_sz, NS_BIT);
 463	} else {
 464		set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
 465		if (nbytes)
 466			set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
 467		else
 468			set_cipher_do(&desc[idx], DO_PAD);
 469	}
 470	set_flow_mode(&desc[idx], S_DIN_to_HASH);
 471	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 472	idx++;
 473
 474	cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx);
 475
 476	if (is_hmac) {
 477		/* HW last hash block padding (aka. "DO_PAD") */
 478		hw_desc_init(&desc[idx]);
 479		set_cipher_mode(&desc[idx], ctx->hw_mode);
 480		set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
 481			      ctx->drvdata->hash_len_sz, NS_BIT, 0);
 482		set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 483		set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
 484		set_cipher_do(&desc[idx], DO_PAD);
 485		idx++;
 486
 487		idx = cc_fin_hmac(desc, req, idx);
 488	}
 489
 490	idx = cc_fin_result(desc, req, idx);
 491
 492	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
 493	if (rc != -EINPROGRESS && rc != -EBUSY) {
 494		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
 495		cc_unmap_hash_request(dev, state, src, true);
 496		cc_unmap_result(dev, state, digestsize, result);
 497		cc_unmap_req(dev, state, ctx);
 498	}
 499	return rc;
 500}
 501
 502static int cc_restore_hash(struct cc_hw_desc *desc, struct cc_hash_ctx *ctx,
 503			   struct ahash_req_ctx *state, unsigned int idx)
 504{
 505	/* Restore hash digest */
 506	hw_desc_init(&desc[idx]);
 507	set_cipher_mode(&desc[idx], ctx->hw_mode);
 508	set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
 509		     ctx->inter_digestsize, NS_BIT);
 510	set_flow_mode(&desc[idx], S_DIN_to_HASH);
 511	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 512	idx++;
 513
 514	/* Restore hash current length */
 515	hw_desc_init(&desc[idx]);
 516	set_cipher_mode(&desc[idx], ctx->hw_mode);
 517	set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
 518	set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
 519		     ctx->drvdata->hash_len_sz, NS_BIT);
 520	set_flow_mode(&desc[idx], S_DIN_to_HASH);
 521	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 522	idx++;
 523
 524	cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx);
 525
 526	return idx;
 527}
 528
 529static int cc_hash_update(struct ahash_request *req)
 530{
 531	struct ahash_req_ctx *state = ahash_request_ctx(req);
 532	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 533	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 534	unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
 535	struct scatterlist *src = req->src;
 536	unsigned int nbytes = req->nbytes;
 537	struct device *dev = drvdata_to_dev(ctx->drvdata);
 538	struct cc_crypto_req cc_req = {};
 539	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
 540	u32 idx = 0;
 541	int rc;
 542	gfp_t flags = cc_gfp_flags(&req->base);
 543
 544	dev_dbg(dev, "===== %s-update (%d) ====\n", ctx->is_hmac ?
 545		"hmac" : "hash", nbytes);
 546
 547	if (nbytes == 0) {
 548		/* no real updates required */
 549		return 0;
 550	}
 551
 552	rc = cc_map_hash_request_update(ctx->drvdata, state, src, nbytes,
 553					block_size, flags);
 554	if (rc) {
 555		if (rc == 1) {
 556			dev_dbg(dev, " data size not require HW update %x\n",
 557				nbytes);
 558			/* No hardware updates are required */
 559			return 0;
 560		}
 561		dev_err(dev, "map_ahash_request_update() failed\n");
 562		return -ENOMEM;
 563	}
 564
 565	if (cc_map_req(dev, state, ctx)) {
 566		dev_err(dev, "map_ahash_source() failed\n");
 567		cc_unmap_hash_request(dev, state, src, true);
 568		return -EINVAL;
 569	}
 570
 571	/* Setup request structure */
 572	cc_req.user_cb = cc_update_complete;
 573	cc_req.user_arg = req;
 574
 575	idx = cc_restore_hash(desc, ctx, state, idx);
 576
 577	/* store the hash digest result in context */
 578	hw_desc_init(&desc[idx]);
 579	set_cipher_mode(&desc[idx], ctx->hw_mode);
 580	set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
 581		      ctx->inter_digestsize, NS_BIT, 0);
 582	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 583	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 584	idx++;
 585
 586	/* store current hash length in context */
 587	hw_desc_init(&desc[idx]);
 588	set_cipher_mode(&desc[idx], ctx->hw_mode);
 589	set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
 590		      ctx->drvdata->hash_len_sz, NS_BIT, 1);
 591	set_queue_last_ind(ctx->drvdata, &desc[idx]);
 592	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 593	set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
 594	idx++;
 595
 596	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
 597	if (rc != -EINPROGRESS && rc != -EBUSY) {
 598		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
 599		cc_unmap_hash_request(dev, state, src, true);
 600		cc_unmap_req(dev, state, ctx);
 601	}
 602	return rc;
 603}
 604
 605static int cc_hash_finup(struct ahash_request *req)
 606{
 607	struct ahash_req_ctx *state = ahash_request_ctx(req);
 608	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 609	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 610	u32 digestsize = crypto_ahash_digestsize(tfm);
 611	struct scatterlist *src = req->src;
 612	unsigned int nbytes = req->nbytes;
 613	u8 *result = req->result;
 614	struct device *dev = drvdata_to_dev(ctx->drvdata);
 615	bool is_hmac = ctx->is_hmac;
 616	struct cc_crypto_req cc_req = {};
 617	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
 618	unsigned int idx = 0;
 619	int rc;
 620	gfp_t flags = cc_gfp_flags(&req->base);
 621
 622	dev_dbg(dev, "===== %s-finup (%d) ====\n", is_hmac ? "hmac" : "hash",
 623		nbytes);
 624
 625	if (cc_map_req(dev, state, ctx)) {
 626		dev_err(dev, "map_ahash_source() failed\n");
 627		return -EINVAL;
 628	}
 629
 630	if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1,
 631				      flags)) {
 632		dev_err(dev, "map_ahash_request_final() failed\n");
 633		cc_unmap_req(dev, state, ctx);
 634		return -ENOMEM;
 635	}
 636	if (cc_map_result(dev, state, digestsize)) {
 637		dev_err(dev, "map_ahash_digest() failed\n");
 638		cc_unmap_hash_request(dev, state, src, true);
 639		cc_unmap_req(dev, state, ctx);
 640		return -ENOMEM;
 641	}
 642
 643	/* Setup request structure */
 644	cc_req.user_cb = cc_hash_complete;
 645	cc_req.user_arg = req;
 646
 647	idx = cc_restore_hash(desc, ctx, state, idx);
 648
 649	if (is_hmac)
 650		idx = cc_fin_hmac(desc, req, idx);
 651
 652	idx = cc_fin_result(desc, req, idx);
 653
 654	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
 655	if (rc != -EINPROGRESS && rc != -EBUSY) {
 656		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
 657		cc_unmap_hash_request(dev, state, src, true);
 658		cc_unmap_result(dev, state, digestsize, result);
 659		cc_unmap_req(dev, state, ctx);
 660	}
 661	return rc;
 662}
 663
 664static int cc_hash_final(struct ahash_request *req)
 665{
 666	struct ahash_req_ctx *state = ahash_request_ctx(req);
 667	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 668	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 669	u32 digestsize = crypto_ahash_digestsize(tfm);
 670	struct scatterlist *src = req->src;
 671	unsigned int nbytes = req->nbytes;
 672	u8 *result = req->result;
 673	struct device *dev = drvdata_to_dev(ctx->drvdata);
 674	bool is_hmac = ctx->is_hmac;
 675	struct cc_crypto_req cc_req = {};
 676	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
 677	unsigned int idx = 0;
 678	int rc;
 679	gfp_t flags = cc_gfp_flags(&req->base);
 680
 681	dev_dbg(dev, "===== %s-final (%d) ====\n", is_hmac ? "hmac" : "hash",
 682		nbytes);
 683
 684	if (cc_map_req(dev, state, ctx)) {
 685		dev_err(dev, "map_ahash_source() failed\n");
 686		return -EINVAL;
 687	}
 688
 689	if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 0,
 690				      flags)) {
 691		dev_err(dev, "map_ahash_request_final() failed\n");
 692		cc_unmap_req(dev, state, ctx);
 693		return -ENOMEM;
 694	}
 695
 696	if (cc_map_result(dev, state, digestsize)) {
 697		dev_err(dev, "map_ahash_digest() failed\n");
 698		cc_unmap_hash_request(dev, state, src, true);
 699		cc_unmap_req(dev, state, ctx);
 700		return -ENOMEM;
 701	}
 702
 703	/* Setup request structure */
 704	cc_req.user_cb = cc_hash_complete;
 705	cc_req.user_arg = req;
 706
 707	idx = cc_restore_hash(desc, ctx, state, idx);
 708
 709	/* "DO-PAD" must be enabled only when writing current length to HW */
 710	hw_desc_init(&desc[idx]);
 711	set_cipher_do(&desc[idx], DO_PAD);
 712	set_cipher_mode(&desc[idx], ctx->hw_mode);
 713	set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
 714		      ctx->drvdata->hash_len_sz, NS_BIT, 0);
 715	set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
 716	set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 717	idx++;
 718
 719	if (is_hmac)
 720		idx = cc_fin_hmac(desc, req, idx);
 721
 722	idx = cc_fin_result(desc, req, idx);
 723
 724	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
 725	if (rc != -EINPROGRESS && rc != -EBUSY) {
 726		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
 727		cc_unmap_hash_request(dev, state, src, true);
 728		cc_unmap_result(dev, state, digestsize, result);
 729		cc_unmap_req(dev, state, ctx);
 730	}
 731	return rc;
 732}
 733
 
 
 
 
 
 
 
 
 
 
 
 734static int cc_hash_init(struct ahash_request *req)
 735{
 736	struct ahash_req_ctx *state = ahash_request_ctx(req);
 737	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 738	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 739	struct device *dev = drvdata_to_dev(ctx->drvdata);
 740
 741	dev_dbg(dev, "===== init (%d) ====\n", req->nbytes);
 742
 743	cc_init_req(dev, state, ctx);
 744
 745	return 0;
 746}
 747
 748static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
 749			  unsigned int keylen)
 750{
 751	unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
 752	struct cc_crypto_req cc_req = {};
 753	struct cc_hash_ctx *ctx = NULL;
 754	int blocksize = 0;
 755	int digestsize = 0;
 756	int i, idx = 0, rc = 0;
 757	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
 758	cc_sram_addr_t larval_addr;
 759	struct device *dev;
 760
 761	ctx = crypto_ahash_ctx(ahash);
 762	dev = drvdata_to_dev(ctx->drvdata);
 763	dev_dbg(dev, "start keylen: %d", keylen);
 764
 765	blocksize = crypto_tfm_alg_blocksize(&ahash->base);
 766	digestsize = crypto_ahash_digestsize(ahash);
 767
 768	larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->hash_mode);
 769
 770	/* The keylen value distinguishes HASH in case keylen is ZERO bytes,
 771	 * any NON-ZERO value utilizes HMAC flow
 772	 */
 773	ctx->key_params.keylen = keylen;
 774	ctx->key_params.key_dma_addr = 0;
 775	ctx->is_hmac = true;
 
 776
 777	if (keylen) {
 
 
 
 
 778		ctx->key_params.key_dma_addr =
 779			dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
 
 780		if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
 781			dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
 782				key, keylen);
 
 783			return -ENOMEM;
 784		}
 785		dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
 786			&ctx->key_params.key_dma_addr, ctx->key_params.keylen);
 787
 788		if (keylen > blocksize) {
 789			/* Load hash initial state */
 790			hw_desc_init(&desc[idx]);
 791			set_cipher_mode(&desc[idx], ctx->hw_mode);
 792			set_din_sram(&desc[idx], larval_addr,
 793				     ctx->inter_digestsize);
 794			set_flow_mode(&desc[idx], S_DIN_to_HASH);
 795			set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 796			idx++;
 797
 798			/* Load the hash current length*/
 799			hw_desc_init(&desc[idx]);
 800			set_cipher_mode(&desc[idx], ctx->hw_mode);
 801			set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
 802			set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
 803			set_flow_mode(&desc[idx], S_DIN_to_HASH);
 804			set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 805			idx++;
 806
 807			hw_desc_init(&desc[idx]);
 808			set_din_type(&desc[idx], DMA_DLLI,
 809				     ctx->key_params.key_dma_addr, keylen,
 810				     NS_BIT);
 811			set_flow_mode(&desc[idx], DIN_HASH);
 812			idx++;
 813
 814			/* Get hashed key */
 815			hw_desc_init(&desc[idx]);
 816			set_cipher_mode(&desc[idx], ctx->hw_mode);
 817			set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
 818				      digestsize, NS_BIT, 0);
 819			set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 820			set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 821			set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
 822			cc_set_endianity(ctx->hash_mode, &desc[idx]);
 823			idx++;
 824
 825			hw_desc_init(&desc[idx]);
 826			set_din_const(&desc[idx], 0, (blocksize - digestsize));
 827			set_flow_mode(&desc[idx], BYPASS);
 828			set_dout_dlli(&desc[idx],
 829				      (ctx->opad_tmp_keys_dma_addr +
 830				       digestsize),
 831				      (blocksize - digestsize), NS_BIT, 0);
 832			idx++;
 833		} else {
 834			hw_desc_init(&desc[idx]);
 835			set_din_type(&desc[idx], DMA_DLLI,
 836				     ctx->key_params.key_dma_addr, keylen,
 837				     NS_BIT);
 838			set_flow_mode(&desc[idx], BYPASS);
 839			set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
 840				      keylen, NS_BIT, 0);
 841			idx++;
 842
 843			if ((blocksize - keylen)) {
 844				hw_desc_init(&desc[idx]);
 845				set_din_const(&desc[idx], 0,
 846					      (blocksize - keylen));
 847				set_flow_mode(&desc[idx], BYPASS);
 848				set_dout_dlli(&desc[idx],
 849					      (ctx->opad_tmp_keys_dma_addr +
 850					       keylen), (blocksize - keylen),
 851					      NS_BIT, 0);
 852				idx++;
 853			}
 854		}
 855	} else {
 856		hw_desc_init(&desc[idx]);
 857		set_din_const(&desc[idx], 0, blocksize);
 858		set_flow_mode(&desc[idx], BYPASS);
 859		set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr),
 860			      blocksize, NS_BIT, 0);
 861		idx++;
 862	}
 863
 864	rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
 865	if (rc) {
 866		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
 867		goto out;
 868	}
 869
 870	/* calc derived HMAC key */
 871	for (idx = 0, i = 0; i < 2; i++) {
 872		/* Load hash initial state */
 873		hw_desc_init(&desc[idx]);
 874		set_cipher_mode(&desc[idx], ctx->hw_mode);
 875		set_din_sram(&desc[idx], larval_addr, ctx->inter_digestsize);
 876		set_flow_mode(&desc[idx], S_DIN_to_HASH);
 877		set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
 878		idx++;
 879
 880		/* Load the hash current length*/
 881		hw_desc_init(&desc[idx]);
 882		set_cipher_mode(&desc[idx], ctx->hw_mode);
 883		set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
 884		set_flow_mode(&desc[idx], S_DIN_to_HASH);
 885		set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 886		idx++;
 887
 888		/* Prepare ipad key */
 889		hw_desc_init(&desc[idx]);
 890		set_xor_val(&desc[idx], hmac_pad_const[i]);
 891		set_cipher_mode(&desc[idx], ctx->hw_mode);
 892		set_flow_mode(&desc[idx], S_DIN_to_HASH);
 893		set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
 894		idx++;
 895
 896		/* Perform HASH update */
 897		hw_desc_init(&desc[idx]);
 898		set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
 899			     blocksize, NS_BIT);
 900		set_cipher_mode(&desc[idx], ctx->hw_mode);
 901		set_xor_active(&desc[idx]);
 902		set_flow_mode(&desc[idx], DIN_HASH);
 903		idx++;
 904
 905		/* Get the IPAD/OPAD xor key (Note, IPAD is the initial digest
 906		 * of the first HASH "update" state)
 907		 */
 908		hw_desc_init(&desc[idx]);
 909		set_cipher_mode(&desc[idx], ctx->hw_mode);
 910		if (i > 0) /* Not first iteration */
 911			set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
 912				      ctx->inter_digestsize, NS_BIT, 0);
 913		else /* First iteration */
 914			set_dout_dlli(&desc[idx], ctx->digest_buff_dma_addr,
 915				      ctx->inter_digestsize, NS_BIT, 0);
 916		set_flow_mode(&desc[idx], S_HASH_to_DOUT);
 917		set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
 918		idx++;
 919	}
 920
 921	rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
 922
 923out:
 924	if (rc)
 925		crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
 926
 927	if (ctx->key_params.key_dma_addr) {
 928		dma_unmap_single(dev, ctx->key_params.key_dma_addr,
 929				 ctx->key_params.keylen, DMA_TO_DEVICE);
 930		dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
 931			&ctx->key_params.key_dma_addr, ctx->key_params.keylen);
 932	}
 
 
 
 933	return rc;
 934}
 935
 936static int cc_xcbc_setkey(struct crypto_ahash *ahash,
 937			  const u8 *key, unsigned int keylen)
 938{
 939	struct cc_crypto_req cc_req = {};
 940	struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
 941	struct device *dev = drvdata_to_dev(ctx->drvdata);
 942	int rc = 0;
 943	unsigned int idx = 0;
 944	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
 945
 946	dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
 947
 948	switch (keylen) {
 949	case AES_KEYSIZE_128:
 950	case AES_KEYSIZE_192:
 951	case AES_KEYSIZE_256:
 952		break;
 953	default:
 954		return -EINVAL;
 955	}
 956
 957	ctx->key_params.keylen = keylen;
 958
 
 
 
 
 959	ctx->key_params.key_dma_addr =
 960		dma_map_single(dev, (void *)key, keylen, DMA_TO_DEVICE);
 961	if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
 962		dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
 963			key, keylen);
 
 964		return -ENOMEM;
 965	}
 966	dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
 967		&ctx->key_params.key_dma_addr, ctx->key_params.keylen);
 968
 969	ctx->is_hmac = true;
 970	/* 1. Load the AES key */
 971	hw_desc_init(&desc[idx]);
 972	set_din_type(&desc[idx], DMA_DLLI, ctx->key_params.key_dma_addr,
 973		     keylen, NS_BIT);
 974	set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
 975	set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
 976	set_key_size_aes(&desc[idx], keylen);
 977	set_flow_mode(&desc[idx], S_DIN_to_AES);
 978	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
 979	idx++;
 980
 981	hw_desc_init(&desc[idx]);
 982	set_din_const(&desc[idx], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
 983	set_flow_mode(&desc[idx], DIN_AES_DOUT);
 984	set_dout_dlli(&desc[idx],
 985		      (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET),
 986		      CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
 987	idx++;
 988
 989	hw_desc_init(&desc[idx]);
 990	set_din_const(&desc[idx], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
 991	set_flow_mode(&desc[idx], DIN_AES_DOUT);
 992	set_dout_dlli(&desc[idx],
 993		      (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET),
 994		      CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
 995	idx++;
 996
 997	hw_desc_init(&desc[idx]);
 998	set_din_const(&desc[idx], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
 999	set_flow_mode(&desc[idx], DIN_AES_DOUT);
1000	set_dout_dlli(&desc[idx],
1001		      (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET),
1002		      CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
1003	idx++;
1004
1005	rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
1006
1007	if (rc)
1008		crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
1009
1010	dma_unmap_single(dev, ctx->key_params.key_dma_addr,
1011			 ctx->key_params.keylen, DMA_TO_DEVICE);
1012	dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
1013		&ctx->key_params.key_dma_addr, ctx->key_params.keylen);
1014
 
 
1015	return rc;
1016}
1017
1018static int cc_cmac_setkey(struct crypto_ahash *ahash,
1019			  const u8 *key, unsigned int keylen)
1020{
1021	struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1022	struct device *dev = drvdata_to_dev(ctx->drvdata);
1023
1024	dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
1025
1026	ctx->is_hmac = true;
1027
1028	switch (keylen) {
1029	case AES_KEYSIZE_128:
1030	case AES_KEYSIZE_192:
1031	case AES_KEYSIZE_256:
1032		break;
1033	default:
1034		return -EINVAL;
1035	}
1036
1037	ctx->key_params.keylen = keylen;
1038
1039	/* STAT_PHASE_1: Copy key to ctx */
1040
1041	dma_sync_single_for_cpu(dev, ctx->opad_tmp_keys_dma_addr,
1042				keylen, DMA_TO_DEVICE);
1043
1044	memcpy(ctx->opad_tmp_keys_buff, key, keylen);
1045	if (keylen == 24) {
1046		memset(ctx->opad_tmp_keys_buff + 24, 0,
1047		       CC_AES_KEY_SIZE_MAX - 24);
1048	}
1049
1050	dma_sync_single_for_device(dev, ctx->opad_tmp_keys_dma_addr,
1051				   keylen, DMA_TO_DEVICE);
1052
1053	ctx->key_params.keylen = keylen;
1054
1055	return 0;
1056}
1057
1058static void cc_free_ctx(struct cc_hash_ctx *ctx)
1059{
1060	struct device *dev = drvdata_to_dev(ctx->drvdata);
1061
1062	if (ctx->digest_buff_dma_addr) {
1063		dma_unmap_single(dev, ctx->digest_buff_dma_addr,
1064				 sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
1065		dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
1066			&ctx->digest_buff_dma_addr);
1067		ctx->digest_buff_dma_addr = 0;
1068	}
1069	if (ctx->opad_tmp_keys_dma_addr) {
1070		dma_unmap_single(dev, ctx->opad_tmp_keys_dma_addr,
1071				 sizeof(ctx->opad_tmp_keys_buff),
1072				 DMA_BIDIRECTIONAL);
1073		dev_dbg(dev, "Unmapped opad-digest: opad_tmp_keys_dma_addr=%pad\n",
1074			&ctx->opad_tmp_keys_dma_addr);
1075		ctx->opad_tmp_keys_dma_addr = 0;
1076	}
1077
1078	ctx->key_params.keylen = 0;
1079}
1080
1081static int cc_alloc_ctx(struct cc_hash_ctx *ctx)
1082{
1083	struct device *dev = drvdata_to_dev(ctx->drvdata);
1084
1085	ctx->key_params.keylen = 0;
1086
1087	ctx->digest_buff_dma_addr =
1088		dma_map_single(dev, (void *)ctx->digest_buff,
1089			       sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
1090	if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) {
1091		dev_err(dev, "Mapping digest len %zu B at va=%pK for DMA failed\n",
1092			sizeof(ctx->digest_buff), ctx->digest_buff);
1093		goto fail;
1094	}
1095	dev_dbg(dev, "Mapped digest %zu B at va=%pK to dma=%pad\n",
1096		sizeof(ctx->digest_buff), ctx->digest_buff,
1097		&ctx->digest_buff_dma_addr);
1098
1099	ctx->opad_tmp_keys_dma_addr =
1100		dma_map_single(dev, (void *)ctx->opad_tmp_keys_buff,
1101			       sizeof(ctx->opad_tmp_keys_buff),
1102			       DMA_BIDIRECTIONAL);
1103	if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) {
1104		dev_err(dev, "Mapping opad digest %zu B at va=%pK for DMA failed\n",
1105			sizeof(ctx->opad_tmp_keys_buff),
1106			ctx->opad_tmp_keys_buff);
1107		goto fail;
1108	}
1109	dev_dbg(dev, "Mapped opad_tmp_keys %zu B at va=%pK to dma=%pad\n",
1110		sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff,
1111		&ctx->opad_tmp_keys_dma_addr);
1112
1113	ctx->is_hmac = false;
1114	return 0;
1115
1116fail:
1117	cc_free_ctx(ctx);
1118	return -ENOMEM;
1119}
1120
 
 
 
 
 
 
 
 
 
 
1121static int cc_cra_init(struct crypto_tfm *tfm)
1122{
1123	struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1124	struct hash_alg_common *hash_alg_common =
1125		container_of(tfm->__crt_alg, struct hash_alg_common, base);
1126	struct ahash_alg *ahash_alg =
1127		container_of(hash_alg_common, struct ahash_alg, halg);
1128	struct cc_hash_alg *cc_alg =
1129			container_of(ahash_alg, struct cc_hash_alg, ahash_alg);
1130
1131	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1132				 sizeof(struct ahash_req_ctx));
1133
1134	ctx->hash_mode = cc_alg->hash_mode;
1135	ctx->hw_mode = cc_alg->hw_mode;
1136	ctx->inter_digestsize = cc_alg->inter_digestsize;
1137	ctx->drvdata = cc_alg->drvdata;
1138
1139	return cc_alloc_ctx(ctx);
1140}
1141
1142static void cc_cra_exit(struct crypto_tfm *tfm)
1143{
1144	struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1145	struct device *dev = drvdata_to_dev(ctx->drvdata);
1146
1147	dev_dbg(dev, "cc_cra_exit");
1148	cc_free_ctx(ctx);
1149}
1150
1151static int cc_mac_update(struct ahash_request *req)
1152{
1153	struct ahash_req_ctx *state = ahash_request_ctx(req);
1154	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1155	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1156	struct device *dev = drvdata_to_dev(ctx->drvdata);
1157	unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
1158	struct cc_crypto_req cc_req = {};
1159	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1160	int rc;
1161	u32 idx = 0;
1162	gfp_t flags = cc_gfp_flags(&req->base);
1163
1164	if (req->nbytes == 0) {
1165		/* no real updates required */
1166		return 0;
1167	}
1168
1169	state->xcbc_count++;
1170
1171	rc = cc_map_hash_request_update(ctx->drvdata, state, req->src,
1172					req->nbytes, block_size, flags);
1173	if (rc) {
1174		if (rc == 1) {
1175			dev_dbg(dev, " data size not require HW update %x\n",
1176				req->nbytes);
1177			/* No hardware updates are required */
1178			return 0;
1179		}
1180		dev_err(dev, "map_ahash_request_update() failed\n");
1181		return -ENOMEM;
1182	}
1183
1184	if (cc_map_req(dev, state, ctx)) {
1185		dev_err(dev, "map_ahash_source() failed\n");
1186		return -EINVAL;
1187	}
1188
1189	if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
1190		cc_setup_xcbc(req, desc, &idx);
1191	else
1192		cc_setup_cmac(req, desc, &idx);
1193
1194	cc_set_desc(state, ctx, DIN_AES_DOUT, desc, true, &idx);
1195
1196	/* store the hash digest result in context */
1197	hw_desc_init(&desc[idx]);
1198	set_cipher_mode(&desc[idx], ctx->hw_mode);
1199	set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
1200		      ctx->inter_digestsize, NS_BIT, 1);
1201	set_queue_last_ind(ctx->drvdata, &desc[idx]);
1202	set_flow_mode(&desc[idx], S_AES_to_DOUT);
1203	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1204	idx++;
1205
1206	/* Setup request structure */
1207	cc_req.user_cb = (void *)cc_update_complete;
1208	cc_req.user_arg = (void *)req;
1209
1210	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1211	if (rc != -EINPROGRESS && rc != -EBUSY) {
1212		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1213		cc_unmap_hash_request(dev, state, req->src, true);
1214		cc_unmap_req(dev, state, ctx);
1215	}
1216	return rc;
1217}
1218
1219static int cc_mac_final(struct ahash_request *req)
1220{
1221	struct ahash_req_ctx *state = ahash_request_ctx(req);
1222	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1223	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1224	struct device *dev = drvdata_to_dev(ctx->drvdata);
1225	struct cc_crypto_req cc_req = {};
1226	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1227	int idx = 0;
1228	int rc = 0;
1229	u32 key_size, key_len;
1230	u32 digestsize = crypto_ahash_digestsize(tfm);
1231	gfp_t flags = cc_gfp_flags(&req->base);
1232	u32 rem_cnt = *cc_hash_buf_cnt(state);
1233
1234	if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1235		key_size = CC_AES_128_BIT_KEY_SIZE;
1236		key_len  = CC_AES_128_BIT_KEY_SIZE;
1237	} else {
1238		key_size = (ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
1239			ctx->key_params.keylen;
1240		key_len =  ctx->key_params.keylen;
1241	}
1242
1243	dev_dbg(dev, "===== final  xcbc reminder (%d) ====\n", rem_cnt);
1244
1245	if (cc_map_req(dev, state, ctx)) {
1246		dev_err(dev, "map_ahash_source() failed\n");
1247		return -EINVAL;
1248	}
1249
1250	if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
1251				      req->nbytes, 0, flags)) {
1252		dev_err(dev, "map_ahash_request_final() failed\n");
1253		cc_unmap_req(dev, state, ctx);
1254		return -ENOMEM;
1255	}
1256
1257	if (cc_map_result(dev, state, digestsize)) {
1258		dev_err(dev, "map_ahash_digest() failed\n");
1259		cc_unmap_hash_request(dev, state, req->src, true);
1260		cc_unmap_req(dev, state, ctx);
1261		return -ENOMEM;
1262	}
1263
1264	/* Setup request structure */
1265	cc_req.user_cb = (void *)cc_hash_complete;
1266	cc_req.user_arg = (void *)req;
1267
1268	if (state->xcbc_count && rem_cnt == 0) {
1269		/* Load key for ECB decryption */
1270		hw_desc_init(&desc[idx]);
1271		set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
1272		set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_DECRYPT);
1273		set_din_type(&desc[idx], DMA_DLLI,
1274			     (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET),
1275			     key_size, NS_BIT);
1276		set_key_size_aes(&desc[idx], key_len);
1277		set_flow_mode(&desc[idx], S_DIN_to_AES);
1278		set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1279		idx++;
1280
1281		/* Initiate decryption of block state to previous
1282		 * block_state-XOR-M[n]
1283		 */
1284		hw_desc_init(&desc[idx]);
1285		set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
1286			     CC_AES_BLOCK_SIZE, NS_BIT);
1287		set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
1288			      CC_AES_BLOCK_SIZE, NS_BIT, 0);
1289		set_flow_mode(&desc[idx], DIN_AES_DOUT);
1290		idx++;
1291
1292		/* Memory Barrier: wait for axi write to complete */
1293		hw_desc_init(&desc[idx]);
1294		set_din_no_dma(&desc[idx], 0, 0xfffff0);
1295		set_dout_no_dma(&desc[idx], 0, 0, 1);
1296		idx++;
1297	}
1298
1299	if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
1300		cc_setup_xcbc(req, desc, &idx);
1301	else
1302		cc_setup_cmac(req, desc, &idx);
1303
1304	if (state->xcbc_count == 0) {
1305		hw_desc_init(&desc[idx]);
1306		set_cipher_mode(&desc[idx], ctx->hw_mode);
1307		set_key_size_aes(&desc[idx], key_len);
1308		set_cmac_size0_mode(&desc[idx]);
1309		set_flow_mode(&desc[idx], S_DIN_to_AES);
1310		idx++;
1311	} else if (rem_cnt > 0) {
1312		cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1313	} else {
1314		hw_desc_init(&desc[idx]);
1315		set_din_const(&desc[idx], 0x00, CC_AES_BLOCK_SIZE);
1316		set_flow_mode(&desc[idx], DIN_AES_DOUT);
1317		idx++;
1318	}
1319
1320	/* Get final MAC result */
1321	hw_desc_init(&desc[idx]);
1322	/* TODO */
1323	set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1324		      digestsize, NS_BIT, 1);
1325	set_queue_last_ind(ctx->drvdata, &desc[idx]);
1326	set_flow_mode(&desc[idx], S_AES_to_DOUT);
1327	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1328	set_cipher_mode(&desc[idx], ctx->hw_mode);
1329	idx++;
1330
1331	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1332	if (rc != -EINPROGRESS && rc != -EBUSY) {
1333		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1334		cc_unmap_hash_request(dev, state, req->src, true);
1335		cc_unmap_result(dev, state, digestsize, req->result);
1336		cc_unmap_req(dev, state, ctx);
1337	}
1338	return rc;
1339}
1340
1341static int cc_mac_finup(struct ahash_request *req)
1342{
1343	struct ahash_req_ctx *state = ahash_request_ctx(req);
1344	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1345	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1346	struct device *dev = drvdata_to_dev(ctx->drvdata);
1347	struct cc_crypto_req cc_req = {};
1348	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1349	int idx = 0;
1350	int rc = 0;
1351	u32 key_len = 0;
1352	u32 digestsize = crypto_ahash_digestsize(tfm);
1353	gfp_t flags = cc_gfp_flags(&req->base);
1354
1355	dev_dbg(dev, "===== finup xcbc(%d) ====\n", req->nbytes);
1356	if (state->xcbc_count > 0 && req->nbytes == 0) {
1357		dev_dbg(dev, "No data to update. Call to fdx_mac_final\n");
1358		return cc_mac_final(req);
1359	}
1360
1361	if (cc_map_req(dev, state, ctx)) {
1362		dev_err(dev, "map_ahash_source() failed\n");
1363		return -EINVAL;
1364	}
1365
1366	if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
1367				      req->nbytes, 1, flags)) {
1368		dev_err(dev, "map_ahash_request_final() failed\n");
1369		cc_unmap_req(dev, state, ctx);
1370		return -ENOMEM;
1371	}
1372	if (cc_map_result(dev, state, digestsize)) {
1373		dev_err(dev, "map_ahash_digest() failed\n");
1374		cc_unmap_hash_request(dev, state, req->src, true);
1375		cc_unmap_req(dev, state, ctx);
1376		return -ENOMEM;
1377	}
1378
1379	/* Setup request structure */
1380	cc_req.user_cb = (void *)cc_hash_complete;
1381	cc_req.user_arg = (void *)req;
1382
1383	if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1384		key_len = CC_AES_128_BIT_KEY_SIZE;
1385		cc_setup_xcbc(req, desc, &idx);
1386	} else {
1387		key_len = ctx->key_params.keylen;
1388		cc_setup_cmac(req, desc, &idx);
1389	}
1390
1391	if (req->nbytes == 0) {
1392		hw_desc_init(&desc[idx]);
1393		set_cipher_mode(&desc[idx], ctx->hw_mode);
1394		set_key_size_aes(&desc[idx], key_len);
1395		set_cmac_size0_mode(&desc[idx]);
1396		set_flow_mode(&desc[idx], S_DIN_to_AES);
1397		idx++;
1398	} else {
1399		cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1400	}
1401
1402	/* Get final MAC result */
1403	hw_desc_init(&desc[idx]);
1404	/* TODO */
1405	set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1406		      digestsize, NS_BIT, 1);
1407	set_queue_last_ind(ctx->drvdata, &desc[idx]);
1408	set_flow_mode(&desc[idx], S_AES_to_DOUT);
1409	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1410	set_cipher_mode(&desc[idx], ctx->hw_mode);
1411	idx++;
1412
1413	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1414	if (rc != -EINPROGRESS && rc != -EBUSY) {
1415		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1416		cc_unmap_hash_request(dev, state, req->src, true);
1417		cc_unmap_result(dev, state, digestsize, req->result);
1418		cc_unmap_req(dev, state, ctx);
1419	}
1420	return rc;
1421}
1422
1423static int cc_mac_digest(struct ahash_request *req)
1424{
1425	struct ahash_req_ctx *state = ahash_request_ctx(req);
1426	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1427	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1428	struct device *dev = drvdata_to_dev(ctx->drvdata);
1429	u32 digestsize = crypto_ahash_digestsize(tfm);
1430	struct cc_crypto_req cc_req = {};
1431	struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1432	u32 key_len;
1433	unsigned int idx = 0;
1434	int rc;
1435	gfp_t flags = cc_gfp_flags(&req->base);
1436
1437	dev_dbg(dev, "===== -digest mac (%d) ====\n",  req->nbytes);
1438
1439	cc_init_req(dev, state, ctx);
1440
1441	if (cc_map_req(dev, state, ctx)) {
1442		dev_err(dev, "map_ahash_source() failed\n");
1443		return -ENOMEM;
1444	}
1445	if (cc_map_result(dev, state, digestsize)) {
1446		dev_err(dev, "map_ahash_digest() failed\n");
1447		cc_unmap_req(dev, state, ctx);
1448		return -ENOMEM;
1449	}
1450
1451	if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
1452				      req->nbytes, 1, flags)) {
1453		dev_err(dev, "map_ahash_request_final() failed\n");
1454		cc_unmap_req(dev, state, ctx);
1455		return -ENOMEM;
1456	}
1457
1458	/* Setup request structure */
1459	cc_req.user_cb = (void *)cc_digest_complete;
1460	cc_req.user_arg = (void *)req;
1461
1462	if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1463		key_len = CC_AES_128_BIT_KEY_SIZE;
1464		cc_setup_xcbc(req, desc, &idx);
1465	} else {
1466		key_len = ctx->key_params.keylen;
1467		cc_setup_cmac(req, desc, &idx);
1468	}
1469
1470	if (req->nbytes == 0) {
1471		hw_desc_init(&desc[idx]);
1472		set_cipher_mode(&desc[idx], ctx->hw_mode);
1473		set_key_size_aes(&desc[idx], key_len);
1474		set_cmac_size0_mode(&desc[idx]);
1475		set_flow_mode(&desc[idx], S_DIN_to_AES);
1476		idx++;
1477	} else {
1478		cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1479	}
1480
1481	/* Get final MAC result */
1482	hw_desc_init(&desc[idx]);
1483	set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1484		      CC_AES_BLOCK_SIZE, NS_BIT, 1);
1485	set_queue_last_ind(ctx->drvdata, &desc[idx]);
1486	set_flow_mode(&desc[idx], S_AES_to_DOUT);
1487	set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1488	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1489	set_cipher_mode(&desc[idx], ctx->hw_mode);
1490	idx++;
1491
1492	rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1493	if (rc != -EINPROGRESS && rc != -EBUSY) {
1494		dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1495		cc_unmap_hash_request(dev, state, req->src, true);
1496		cc_unmap_result(dev, state, digestsize, req->result);
1497		cc_unmap_req(dev, state, ctx);
1498	}
1499	return rc;
1500}
1501
1502static int cc_hash_export(struct ahash_request *req, void *out)
1503{
1504	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1505	struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1506	struct ahash_req_ctx *state = ahash_request_ctx(req);
1507	u8 *curr_buff = cc_hash_buf(state);
1508	u32 curr_buff_cnt = *cc_hash_buf_cnt(state);
1509	const u32 tmp = CC_EXPORT_MAGIC;
1510
1511	memcpy(out, &tmp, sizeof(u32));
1512	out += sizeof(u32);
1513
1514	memcpy(out, state->digest_buff, ctx->inter_digestsize);
1515	out += ctx->inter_digestsize;
1516
1517	memcpy(out, state->digest_bytes_len, ctx->drvdata->hash_len_sz);
1518	out += ctx->drvdata->hash_len_sz;
1519
1520	memcpy(out, &curr_buff_cnt, sizeof(u32));
1521	out += sizeof(u32);
1522
1523	memcpy(out, curr_buff, curr_buff_cnt);
1524
1525	return 0;
1526}
1527
1528static int cc_hash_import(struct ahash_request *req, const void *in)
1529{
1530	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1531	struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1532	struct device *dev = drvdata_to_dev(ctx->drvdata);
1533	struct ahash_req_ctx *state = ahash_request_ctx(req);
1534	u32 tmp;
1535
1536	memcpy(&tmp, in, sizeof(u32));
1537	if (tmp != CC_EXPORT_MAGIC)
1538		return -EINVAL;
1539	in += sizeof(u32);
1540
1541	cc_init_req(dev, state, ctx);
1542
1543	memcpy(state->digest_buff, in, ctx->inter_digestsize);
1544	in += ctx->inter_digestsize;
1545
1546	memcpy(state->digest_bytes_len, in, ctx->drvdata->hash_len_sz);
1547	in += ctx->drvdata->hash_len_sz;
1548
1549	/* Sanity check the data as much as possible */
1550	memcpy(&tmp, in, sizeof(u32));
1551	if (tmp > CC_MAX_HASH_BLCK_SIZE)
1552		return -EINVAL;
1553	in += sizeof(u32);
1554
1555	state->buf_cnt[0] = tmp;
1556	memcpy(state->buffers[0], in, tmp);
1557
1558	return 0;
1559}
1560
1561struct cc_hash_template {
1562	char name[CRYPTO_MAX_ALG_NAME];
1563	char driver_name[CRYPTO_MAX_ALG_NAME];
1564	char mac_name[CRYPTO_MAX_ALG_NAME];
1565	char mac_driver_name[CRYPTO_MAX_ALG_NAME];
1566	unsigned int blocksize;
 
1567	bool synchronize;
1568	struct ahash_alg template_ahash;
1569	int hash_mode;
1570	int hw_mode;
1571	int inter_digestsize;
1572	struct cc_drvdata *drvdata;
1573	u32 min_hw_rev;
 
1574};
1575
1576#define CC_STATE_SIZE(_x) \
1577	((_x) + HASH_MAX_LEN_SIZE + CC_MAX_HASH_BLCK_SIZE + (2 * sizeof(u32)))
1578
1579/* hash descriptors */
1580static struct cc_hash_template driver_hash[] = {
1581	//Asynchronize hash template
1582	{
1583		.name = "sha1",
1584		.driver_name = "sha1-ccree",
1585		.mac_name = "hmac(sha1)",
1586		.mac_driver_name = "hmac-sha1-ccree",
1587		.blocksize = SHA1_BLOCK_SIZE,
 
1588		.synchronize = false,
1589		.template_ahash = {
1590			.init = cc_hash_init,
1591			.update = cc_hash_update,
1592			.final = cc_hash_final,
1593			.finup = cc_hash_finup,
1594			.digest = cc_hash_digest,
1595			.export = cc_hash_export,
1596			.import = cc_hash_import,
1597			.setkey = cc_hash_setkey,
1598			.halg = {
1599				.digestsize = SHA1_DIGEST_SIZE,
1600				.statesize = CC_STATE_SIZE(SHA1_DIGEST_SIZE),
1601			},
1602		},
1603		.hash_mode = DRV_HASH_SHA1,
1604		.hw_mode = DRV_HASH_HW_SHA1,
1605		.inter_digestsize = SHA1_DIGEST_SIZE,
1606		.min_hw_rev = CC_HW_REV_630,
 
1607	},
1608	{
1609		.name = "sha256",
1610		.driver_name = "sha256-ccree",
1611		.mac_name = "hmac(sha256)",
1612		.mac_driver_name = "hmac-sha256-ccree",
1613		.blocksize = SHA256_BLOCK_SIZE,
 
1614		.template_ahash = {
1615			.init = cc_hash_init,
1616			.update = cc_hash_update,
1617			.final = cc_hash_final,
1618			.finup = cc_hash_finup,
1619			.digest = cc_hash_digest,
1620			.export = cc_hash_export,
1621			.import = cc_hash_import,
1622			.setkey = cc_hash_setkey,
1623			.halg = {
1624				.digestsize = SHA256_DIGEST_SIZE,
1625				.statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE)
1626			},
1627		},
1628		.hash_mode = DRV_HASH_SHA256,
1629		.hw_mode = DRV_HASH_HW_SHA256,
1630		.inter_digestsize = SHA256_DIGEST_SIZE,
1631		.min_hw_rev = CC_HW_REV_630,
 
1632	},
1633	{
1634		.name = "sha224",
1635		.driver_name = "sha224-ccree",
1636		.mac_name = "hmac(sha224)",
1637		.mac_driver_name = "hmac-sha224-ccree",
1638		.blocksize = SHA224_BLOCK_SIZE,
 
1639		.template_ahash = {
1640			.init = cc_hash_init,
1641			.update = cc_hash_update,
1642			.final = cc_hash_final,
1643			.finup = cc_hash_finup,
1644			.digest = cc_hash_digest,
1645			.export = cc_hash_export,
1646			.import = cc_hash_import,
1647			.setkey = cc_hash_setkey,
1648			.halg = {
1649				.digestsize = SHA224_DIGEST_SIZE,
1650				.statesize = CC_STATE_SIZE(SHA224_DIGEST_SIZE),
1651			},
1652		},
1653		.hash_mode = DRV_HASH_SHA224,
1654		.hw_mode = DRV_HASH_HW_SHA256,
1655		.inter_digestsize = SHA256_DIGEST_SIZE,
1656		.min_hw_rev = CC_HW_REV_630,
 
1657	},
1658	{
1659		.name = "sha384",
1660		.driver_name = "sha384-ccree",
1661		.mac_name = "hmac(sha384)",
1662		.mac_driver_name = "hmac-sha384-ccree",
1663		.blocksize = SHA384_BLOCK_SIZE,
 
1664		.template_ahash = {
1665			.init = cc_hash_init,
1666			.update = cc_hash_update,
1667			.final = cc_hash_final,
1668			.finup = cc_hash_finup,
1669			.digest = cc_hash_digest,
1670			.export = cc_hash_export,
1671			.import = cc_hash_import,
1672			.setkey = cc_hash_setkey,
1673			.halg = {
1674				.digestsize = SHA384_DIGEST_SIZE,
1675				.statesize = CC_STATE_SIZE(SHA384_DIGEST_SIZE),
1676			},
1677		},
1678		.hash_mode = DRV_HASH_SHA384,
1679		.hw_mode = DRV_HASH_HW_SHA512,
1680		.inter_digestsize = SHA512_DIGEST_SIZE,
1681		.min_hw_rev = CC_HW_REV_712,
 
1682	},
1683	{
1684		.name = "sha512",
1685		.driver_name = "sha512-ccree",
1686		.mac_name = "hmac(sha512)",
1687		.mac_driver_name = "hmac-sha512-ccree",
1688		.blocksize = SHA512_BLOCK_SIZE,
 
1689		.template_ahash = {
1690			.init = cc_hash_init,
1691			.update = cc_hash_update,
1692			.final = cc_hash_final,
1693			.finup = cc_hash_finup,
1694			.digest = cc_hash_digest,
1695			.export = cc_hash_export,
1696			.import = cc_hash_import,
1697			.setkey = cc_hash_setkey,
1698			.halg = {
1699				.digestsize = SHA512_DIGEST_SIZE,
1700				.statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
1701			},
1702		},
1703		.hash_mode = DRV_HASH_SHA512,
1704		.hw_mode = DRV_HASH_HW_SHA512,
1705		.inter_digestsize = SHA512_DIGEST_SIZE,
1706		.min_hw_rev = CC_HW_REV_712,
 
1707	},
1708	{
1709		.name = "md5",
1710		.driver_name = "md5-ccree",
1711		.mac_name = "hmac(md5)",
1712		.mac_driver_name = "hmac-md5-ccree",
1713		.blocksize = MD5_HMAC_BLOCK_SIZE,
 
1714		.template_ahash = {
1715			.init = cc_hash_init,
1716			.update = cc_hash_update,
1717			.final = cc_hash_final,
1718			.finup = cc_hash_finup,
1719			.digest = cc_hash_digest,
1720			.export = cc_hash_export,
1721			.import = cc_hash_import,
1722			.setkey = cc_hash_setkey,
1723			.halg = {
1724				.digestsize = MD5_DIGEST_SIZE,
1725				.statesize = CC_STATE_SIZE(MD5_DIGEST_SIZE),
1726			},
1727		},
1728		.hash_mode = DRV_HASH_MD5,
1729		.hw_mode = DRV_HASH_HW_MD5,
1730		.inter_digestsize = MD5_DIGEST_SIZE,
1731		.min_hw_rev = CC_HW_REV_630,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1732	},
1733	{
1734		.mac_name = "xcbc(aes)",
1735		.mac_driver_name = "xcbc-aes-ccree",
1736		.blocksize = AES_BLOCK_SIZE,
 
1737		.template_ahash = {
1738			.init = cc_hash_init,
1739			.update = cc_mac_update,
1740			.final = cc_mac_final,
1741			.finup = cc_mac_finup,
1742			.digest = cc_mac_digest,
1743			.setkey = cc_xcbc_setkey,
1744			.export = cc_hash_export,
1745			.import = cc_hash_import,
1746			.halg = {
1747				.digestsize = AES_BLOCK_SIZE,
1748				.statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
1749			},
1750		},
1751		.hash_mode = DRV_HASH_NULL,
1752		.hw_mode = DRV_CIPHER_XCBC_MAC,
1753		.inter_digestsize = AES_BLOCK_SIZE,
1754		.min_hw_rev = CC_HW_REV_630,
 
1755	},
1756	{
1757		.mac_name = "cmac(aes)",
1758		.mac_driver_name = "cmac-aes-ccree",
1759		.blocksize = AES_BLOCK_SIZE,
 
1760		.template_ahash = {
1761			.init = cc_hash_init,
1762			.update = cc_mac_update,
1763			.final = cc_mac_final,
1764			.finup = cc_mac_finup,
1765			.digest = cc_mac_digest,
1766			.setkey = cc_cmac_setkey,
1767			.export = cc_hash_export,
1768			.import = cc_hash_import,
1769			.halg = {
1770				.digestsize = AES_BLOCK_SIZE,
1771				.statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
1772			},
1773		},
1774		.hash_mode = DRV_HASH_NULL,
1775		.hw_mode = DRV_CIPHER_CMAC,
1776		.inter_digestsize = AES_BLOCK_SIZE,
1777		.min_hw_rev = CC_HW_REV_630,
 
1778	},
1779};
1780
1781static struct cc_hash_alg *cc_alloc_hash_alg(struct cc_hash_template *template,
1782					     struct device *dev, bool keyed)
1783{
1784	struct cc_hash_alg *t_crypto_alg;
1785	struct crypto_alg *alg;
1786	struct ahash_alg *halg;
1787
1788	t_crypto_alg = kzalloc(sizeof(*t_crypto_alg), GFP_KERNEL);
1789	if (!t_crypto_alg)
1790		return ERR_PTR(-ENOMEM);
1791
1792	t_crypto_alg->ahash_alg = template->template_ahash;
1793	halg = &t_crypto_alg->ahash_alg;
1794	alg = &halg->halg.base;
1795
1796	if (keyed) {
1797		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1798			 template->mac_name);
1799		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1800			 template->mac_driver_name);
1801	} else {
1802		halg->setkey = NULL;
1803		snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1804			 template->name);
1805		snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1806			 template->driver_name);
1807	}
1808	alg->cra_module = THIS_MODULE;
1809	alg->cra_ctxsize = sizeof(struct cc_hash_ctx);
1810	alg->cra_priority = CC_CRA_PRIO;
1811	alg->cra_blocksize = template->blocksize;
1812	alg->cra_alignmask = 0;
1813	alg->cra_exit = cc_cra_exit;
1814
1815	alg->cra_init = cc_cra_init;
1816	alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH |
1817			CRYPTO_ALG_KERN_DRIVER_ONLY;
1818	alg->cra_type = &crypto_ahash_type;
1819
1820	t_crypto_alg->hash_mode = template->hash_mode;
1821	t_crypto_alg->hw_mode = template->hw_mode;
1822	t_crypto_alg->inter_digestsize = template->inter_digestsize;
1823
1824	return t_crypto_alg;
1825}
1826
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1827int cc_init_hash_sram(struct cc_drvdata *drvdata)
1828{
1829	struct cc_hash_handle *hash_handle = drvdata->hash_handle;
1830	cc_sram_addr_t sram_buff_ofs = hash_handle->digest_len_sram_addr;
1831	unsigned int larval_seq_len = 0;
1832	struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)];
1833	bool large_sha_supported = (drvdata->hw_rev >= CC_HW_REV_712);
 
1834	int rc = 0;
1835
1836	/* Copy-to-sram digest-len */
1837	cc_set_sram_desc(digest_len_init, sram_buff_ofs,
1838			 ARRAY_SIZE(digest_len_init), larval_seq,
1839			 &larval_seq_len);
1840	rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1841	if (rc)
1842		goto init_digest_const_err;
1843
1844	sram_buff_ofs += sizeof(digest_len_init);
1845	larval_seq_len = 0;
1846
1847	if (large_sha_supported) {
1848		/* Copy-to-sram digest-len for sha384/512 */
1849		cc_set_sram_desc(digest_len_sha512_init, sram_buff_ofs,
1850				 ARRAY_SIZE(digest_len_sha512_init),
1851				 larval_seq, &larval_seq_len);
1852		rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1853		if (rc)
1854			goto init_digest_const_err;
1855
1856		sram_buff_ofs += sizeof(digest_len_sha512_init);
1857		larval_seq_len = 0;
1858	}
1859
1860	/* The initial digests offset */
1861	hash_handle->larval_digest_sram_addr = sram_buff_ofs;
1862
1863	/* Copy-to-sram initial SHA* digests */
1864	cc_set_sram_desc(md5_init, sram_buff_ofs, ARRAY_SIZE(md5_init),
1865			 larval_seq, &larval_seq_len);
1866	rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1867	if (rc)
1868		goto init_digest_const_err;
1869	sram_buff_ofs += sizeof(md5_init);
1870	larval_seq_len = 0;
1871
1872	cc_set_sram_desc(sha1_init, sram_buff_ofs,
1873			 ARRAY_SIZE(sha1_init), larval_seq,
1874			 &larval_seq_len);
1875	rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1876	if (rc)
1877		goto init_digest_const_err;
1878	sram_buff_ofs += sizeof(sha1_init);
1879	larval_seq_len = 0;
1880
1881	cc_set_sram_desc(sha224_init, sram_buff_ofs,
1882			 ARRAY_SIZE(sha224_init), larval_seq,
1883			 &larval_seq_len);
1884	rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1885	if (rc)
1886		goto init_digest_const_err;
1887	sram_buff_ofs += sizeof(sha224_init);
1888	larval_seq_len = 0;
1889
1890	cc_set_sram_desc(sha256_init, sram_buff_ofs,
1891			 ARRAY_SIZE(sha256_init), larval_seq,
1892			 &larval_seq_len);
1893	rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1894	if (rc)
1895		goto init_digest_const_err;
1896	sram_buff_ofs += sizeof(sha256_init);
1897	larval_seq_len = 0;
 
 
 
 
 
1898
1899	if (large_sha_supported) {
1900		cc_set_sram_desc((u32 *)sha384_init, sram_buff_ofs,
1901				 (ARRAY_SIZE(sha384_init) * 2), larval_seq,
1902				 &larval_seq_len);
1903		rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1904		if (rc)
1905			goto init_digest_const_err;
1906		sram_buff_ofs += sizeof(sha384_init);
1907		larval_seq_len = 0;
1908
1909		cc_set_sram_desc((u32 *)sha512_init, sram_buff_ofs,
1910				 (ARRAY_SIZE(sha512_init) * 2), larval_seq,
1911				 &larval_seq_len);
1912		rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1913		if (rc)
1914			goto init_digest_const_err;
1915	}
1916
1917init_digest_const_err:
1918	return rc;
1919}
1920
1921static void __init cc_swap_dwords(u32 *buf, unsigned long size)
1922{
1923	int i;
1924	u32 tmp;
1925
1926	for (i = 0; i < size; i += 2) {
1927		tmp = buf[i];
1928		buf[i] = buf[i + 1];
1929		buf[i + 1] = tmp;
1930	}
1931}
1932
1933/*
1934 * Due to the way the HW works we need to swap every
1935 * double word in the SHA384 and SHA512 larval hashes
1936 */
1937void __init cc_hash_global_init(void)
1938{
1939	cc_swap_dwords((u32 *)&sha384_init, (ARRAY_SIZE(sha384_init) * 2));
1940	cc_swap_dwords((u32 *)&sha512_init, (ARRAY_SIZE(sha512_init) * 2));
1941}
1942
1943int cc_hash_alloc(struct cc_drvdata *drvdata)
1944{
1945	struct cc_hash_handle *hash_handle;
1946	cc_sram_addr_t sram_buff;
1947	u32 sram_size_to_alloc;
1948	struct device *dev = drvdata_to_dev(drvdata);
1949	int rc = 0;
1950	int alg;
1951
1952	hash_handle = kzalloc(sizeof(*hash_handle), GFP_KERNEL);
1953	if (!hash_handle)
1954		return -ENOMEM;
1955
1956	INIT_LIST_HEAD(&hash_handle->hash_list);
1957	drvdata->hash_handle = hash_handle;
1958
1959	sram_size_to_alloc = sizeof(digest_len_init) +
1960			sizeof(md5_init) +
1961			sizeof(sha1_init) +
1962			sizeof(sha224_init) +
1963			sizeof(sha256_init);
 
 
 
1964
1965	if (drvdata->hw_rev >= CC_HW_REV_712)
1966		sram_size_to_alloc += sizeof(digest_len_sha512_init) +
1967			sizeof(sha384_init) + sizeof(sha512_init);
1968
1969	sram_buff = cc_sram_alloc(drvdata, sram_size_to_alloc);
1970	if (sram_buff == NULL_SRAM_ADDR) {
1971		dev_err(dev, "SRAM pool exhausted\n");
1972		rc = -ENOMEM;
1973		goto fail;
1974	}
1975
1976	/* The initial digest-len offset */
1977	hash_handle->digest_len_sram_addr = sram_buff;
1978
1979	/*must be set before the alg registration as it is being used there*/
1980	rc = cc_init_hash_sram(drvdata);
1981	if (rc) {
1982		dev_err(dev, "Init digest CONST failed (rc=%d)\n", rc);
1983		goto fail;
1984	}
1985
1986	/* ahash registration */
1987	for (alg = 0; alg < ARRAY_SIZE(driver_hash); alg++) {
1988		struct cc_hash_alg *t_alg;
1989		int hw_mode = driver_hash[alg].hw_mode;
1990
1991		/* We either support both HASH and MAC or none */
1992		if (driver_hash[alg].min_hw_rev > drvdata->hw_rev)
 
1993			continue;
1994
1995		/* register hmac version */
1996		t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, true);
1997		if (IS_ERR(t_alg)) {
1998			rc = PTR_ERR(t_alg);
1999			dev_err(dev, "%s alg allocation failed\n",
2000				driver_hash[alg].driver_name);
2001			goto fail;
2002		}
2003		t_alg->drvdata = drvdata;
 
 
 
 
 
 
 
 
2004
2005		rc = crypto_register_ahash(&t_alg->ahash_alg);
2006		if (rc) {
2007			dev_err(dev, "%s alg registration failed\n",
2008				driver_hash[alg].driver_name);
2009			kfree(t_alg);
2010			goto fail;
2011		} else {
2012			list_add_tail(&t_alg->entry, &hash_handle->hash_list);
2013		}
2014
2015		if (hw_mode == DRV_CIPHER_XCBC_MAC ||
2016		    hw_mode == DRV_CIPHER_CMAC)
2017			continue;
2018
2019		/* register hash version */
2020		t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, false);
2021		if (IS_ERR(t_alg)) {
2022			rc = PTR_ERR(t_alg);
2023			dev_err(dev, "%s alg allocation failed\n",
2024				driver_hash[alg].driver_name);
2025			goto fail;
2026		}
2027		t_alg->drvdata = drvdata;
2028
2029		rc = crypto_register_ahash(&t_alg->ahash_alg);
2030		if (rc) {
2031			dev_err(dev, "%s alg registration failed\n",
2032				driver_hash[alg].driver_name);
2033			kfree(t_alg);
2034			goto fail;
2035		} else {
2036			list_add_tail(&t_alg->entry, &hash_handle->hash_list);
2037		}
 
 
2038	}
2039
2040	return 0;
2041
2042fail:
2043	kfree(drvdata->hash_handle);
2044	drvdata->hash_handle = NULL;
2045	return rc;
2046}
2047
2048int cc_hash_free(struct cc_drvdata *drvdata)
2049{
2050	struct cc_hash_alg *t_hash_alg, *hash_n;
2051	struct cc_hash_handle *hash_handle = drvdata->hash_handle;
2052
2053	if (hash_handle) {
2054		list_for_each_entry_safe(t_hash_alg, hash_n,
2055					 &hash_handle->hash_list, entry) {
2056			crypto_unregister_ahash(&t_hash_alg->ahash_alg);
2057			list_del(&t_hash_alg->entry);
2058			kfree(t_hash_alg);
2059		}
2060
2061		kfree(hash_handle);
2062		drvdata->hash_handle = NULL;
2063	}
2064	return 0;
2065}
2066
2067static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
2068			  unsigned int *seq_size)
2069{
2070	unsigned int idx = *seq_size;
2071	struct ahash_req_ctx *state = ahash_request_ctx(areq);
2072	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2073	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
2074
2075	/* Setup XCBC MAC K1 */
2076	hw_desc_init(&desc[idx]);
2077	set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
2078					    XCBC_MAC_K1_OFFSET),
2079		     CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2080	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
2081	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2082	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2083	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2084	set_flow_mode(&desc[idx], S_DIN_to_AES);
2085	idx++;
2086
2087	/* Setup XCBC MAC K2 */
2088	hw_desc_init(&desc[idx]);
2089	set_din_type(&desc[idx], DMA_DLLI,
2090		     (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET),
2091		     CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2092	set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
2093	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2094	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2095	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2096	set_flow_mode(&desc[idx], S_DIN_to_AES);
2097	idx++;
2098
2099	/* Setup XCBC MAC K3 */
2100	hw_desc_init(&desc[idx]);
2101	set_din_type(&desc[idx], DMA_DLLI,
2102		     (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET),
2103		     CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2104	set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
2105	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2106	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2107	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2108	set_flow_mode(&desc[idx], S_DIN_to_AES);
2109	idx++;
2110
2111	/* Loading MAC state */
2112	hw_desc_init(&desc[idx]);
2113	set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
2114		     CC_AES_BLOCK_SIZE, NS_BIT);
2115	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
2116	set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2117	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2118	set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2119	set_flow_mode(&desc[idx], S_DIN_to_AES);
2120	idx++;
2121	*seq_size = idx;
2122}
2123
2124static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[],
2125			  unsigned int *seq_size)
2126{
2127	unsigned int idx = *seq_size;
2128	struct ahash_req_ctx *state = ahash_request_ctx(areq);
2129	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2130	struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
2131
2132	/* Setup CMAC Key */
2133	hw_desc_init(&desc[idx]);
2134	set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
2135		     ((ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
2136		      ctx->key_params.keylen), NS_BIT);
2137	set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
2138	set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
2139	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2140	set_key_size_aes(&desc[idx], ctx->key_params.keylen);
2141	set_flow_mode(&desc[idx], S_DIN_to_AES);
2142	idx++;
2143
2144	/* Load MAC state */
2145	hw_desc_init(&desc[idx]);
2146	set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
2147		     CC_AES_BLOCK_SIZE, NS_BIT);
2148	set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
2149	set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
2150	set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2151	set_key_size_aes(&desc[idx], ctx->key_params.keylen);
2152	set_flow_mode(&desc[idx], S_DIN_to_AES);
2153	idx++;
2154	*seq_size = idx;
2155}
2156
2157static void cc_set_desc(struct ahash_req_ctx *areq_ctx,
2158			struct cc_hash_ctx *ctx, unsigned int flow_mode,
2159			struct cc_hw_desc desc[], bool is_not_last_data,
2160			unsigned int *seq_size)
2161{
2162	unsigned int idx = *seq_size;
2163	struct device *dev = drvdata_to_dev(ctx->drvdata);
2164
2165	if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_DLLI) {
2166		hw_desc_init(&desc[idx]);
2167		set_din_type(&desc[idx], DMA_DLLI,
2168			     sg_dma_address(areq_ctx->curr_sg),
2169			     areq_ctx->curr_sg->length, NS_BIT);
2170		set_flow_mode(&desc[idx], flow_mode);
2171		idx++;
2172	} else {
2173		if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
2174			dev_dbg(dev, " NULL mode\n");
2175			/* nothing to build */
2176			return;
2177		}
2178		/* bypass */
2179		hw_desc_init(&desc[idx]);
2180		set_din_type(&desc[idx], DMA_DLLI,
2181			     areq_ctx->mlli_params.mlli_dma_addr,
2182			     areq_ctx->mlli_params.mlli_len, NS_BIT);
2183		set_dout_sram(&desc[idx], ctx->drvdata->mlli_sram_addr,
2184			      areq_ctx->mlli_params.mlli_len);
2185		set_flow_mode(&desc[idx], BYPASS);
2186		idx++;
2187		/* process */
2188		hw_desc_init(&desc[idx]);
2189		set_din_type(&desc[idx], DMA_MLLI,
2190			     ctx->drvdata->mlli_sram_addr,
2191			     areq_ctx->mlli_nents, NS_BIT);
2192		set_flow_mode(&desc[idx], flow_mode);
2193		idx++;
2194	}
2195	if (is_not_last_data)
2196		set_din_not_last_indication(&desc[(idx - 1)]);
2197	/* return updated desc sequence size */
2198	*seq_size = idx;
2199}
2200
2201static const void *cc_larval_digest(struct device *dev, u32 mode)
2202{
2203	switch (mode) {
2204	case DRV_HASH_MD5:
2205		return md5_init;
2206	case DRV_HASH_SHA1:
2207		return sha1_init;
2208	case DRV_HASH_SHA224:
2209		return sha224_init;
2210	case DRV_HASH_SHA256:
2211		return sha256_init;
2212	case DRV_HASH_SHA384:
2213		return sha384_init;
2214	case DRV_HASH_SHA512:
2215		return sha512_init;
 
 
2216	default:
2217		dev_err(dev, "Invalid hash mode (%d)\n", mode);
2218		return md5_init;
2219	}
2220}
2221
2222/*!
2223 * Gets the address of the initial digest in SRAM
2224 * according to the given hash mode
2225 *
2226 * \param drvdata
2227 * \param mode The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256
2228 *
2229 * \return u32 The address of the initial digest in SRAM
 
2230 */
2231cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode)
2232{
2233	struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
2234	struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
2235	struct device *dev = drvdata_to_dev(_drvdata);
 
 
2236
2237	switch (mode) {
2238	case DRV_HASH_NULL:
2239		break; /*Ignore*/
2240	case DRV_HASH_MD5:
2241		return (hash_handle->larval_digest_sram_addr);
2242	case DRV_HASH_SHA1:
2243		return (hash_handle->larval_digest_sram_addr +
2244			sizeof(md5_init));
2245	case DRV_HASH_SHA224:
2246		return (hash_handle->larval_digest_sram_addr +
2247			sizeof(md5_init) +
2248			sizeof(sha1_init));
2249	case DRV_HASH_SHA256:
2250		return (hash_handle->larval_digest_sram_addr +
2251			sizeof(md5_init) +
2252			sizeof(sha1_init) +
2253			sizeof(sha224_init));
 
 
 
 
 
 
2254	case DRV_HASH_SHA384:
2255		return (hash_handle->larval_digest_sram_addr +
2256			sizeof(md5_init) +
2257			sizeof(sha1_init) +
2258			sizeof(sha224_init) +
2259			sizeof(sha256_init));
 
 
 
2260	case DRV_HASH_SHA512:
2261		return (hash_handle->larval_digest_sram_addr +
2262			sizeof(md5_init) +
2263			sizeof(sha1_init) +
2264			sizeof(sha224_init) +
2265			sizeof(sha256_init) +
2266			sizeof(sha384_init));
 
 
 
2267	default:
2268		dev_err(dev, "Invalid hash mode (%d)\n", mode);
2269	}
2270
2271	/*This is valid wrong value to avoid kernel crash*/
2272	return hash_handle->larval_digest_sram_addr;
2273}
2274
2275cc_sram_addr_t
2276cc_digest_len_addr(void *drvdata, u32 mode)
2277{
2278	struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
2279	struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
2280	cc_sram_addr_t digest_len_addr = hash_handle->digest_len_sram_addr;
2281
2282	switch (mode) {
2283	case DRV_HASH_SHA1:
2284	case DRV_HASH_SHA224:
2285	case DRV_HASH_SHA256:
2286	case DRV_HASH_MD5:
2287		return digest_len_addr;
2288#if (CC_DEV_SHA_MAX > 256)
2289	case DRV_HASH_SHA384:
2290	case DRV_HASH_SHA512:
2291		return  digest_len_addr + sizeof(digest_len_init);
2292#endif
2293	default:
2294		return digest_len_addr; /*to avoid kernel crash*/
2295	}
2296}