Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.9.
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Copyright (c) 2021 Aspeed Technology Inc.
   4 */
   5
   6#include "aspeed-hace.h"
   7#include <crypto/engine.h>
   8#include <crypto/hmac.h>
   9#include <crypto/internal/hash.h>
  10#include <crypto/scatterwalk.h>
  11#include <crypto/sha1.h>
  12#include <crypto/sha2.h>
  13#include <linux/dma-mapping.h>
  14#include <linux/err.h>
  15#include <linux/io.h>
  16#include <linux/kernel.h>
  17#include <linux/string.h>
  18
  19#ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG
  20#define AHASH_DBG(h, fmt, ...)	\
  21	dev_info((h)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
  22#else
  23#define AHASH_DBG(h, fmt, ...)	\
  24	dev_dbg((h)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
  25#endif
  26
  27/* Initialization Vectors for SHA-family */
  28static const __be32 sha1_iv[8] = {
  29	cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1),
  30	cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3),
  31	cpu_to_be32(SHA1_H4), 0, 0, 0
  32};
  33
  34static const __be32 sha224_iv[8] = {
  35	cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1),
  36	cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3),
  37	cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5),
  38	cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7),
  39};
  40
  41static const __be32 sha256_iv[8] = {
  42	cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1),
  43	cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3),
  44	cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5),
  45	cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
  46};
  47
  48static const __be64 sha384_iv[8] = {
  49	cpu_to_be64(SHA384_H0), cpu_to_be64(SHA384_H1),
  50	cpu_to_be64(SHA384_H2), cpu_to_be64(SHA384_H3),
  51	cpu_to_be64(SHA384_H4), cpu_to_be64(SHA384_H5),
  52	cpu_to_be64(SHA384_H6), cpu_to_be64(SHA384_H7)
  53};
  54
  55static const __be64 sha512_iv[8] = {
  56	cpu_to_be64(SHA512_H0), cpu_to_be64(SHA512_H1),
  57	cpu_to_be64(SHA512_H2), cpu_to_be64(SHA512_H3),
  58	cpu_to_be64(SHA512_H4), cpu_to_be64(SHA512_H5),
  59	cpu_to_be64(SHA512_H6), cpu_to_be64(SHA512_H7)
  60};
  61
  62/* The purpose of this padding is to ensure that the padded message is a
  63 * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512).
  64 * The bit "1" is appended at the end of the message followed by
  65 * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or
  66 * 128 bits block (SHA384/SHA512) equals to the message length in bits
  67 * is appended.
  68 *
  69 * For SHA1/SHA224/SHA256, padlen is calculated as followed:
  70 *  - if message length < 56 bytes then padlen = 56 - message length
  71 *  - else padlen = 64 + 56 - message length
  72 *
  73 * For SHA384/SHA512, padlen is calculated as followed:
  74 *  - if message length < 112 bytes then padlen = 112 - message length
  75 *  - else padlen = 128 + 112 - message length
  76 */
  77static void aspeed_ahash_fill_padding(struct aspeed_hace_dev *hace_dev,
  78				      struct aspeed_sham_reqctx *rctx)
  79{
  80	unsigned int index, padlen;
  81	__be64 bits[2];
  82
  83	AHASH_DBG(hace_dev, "rctx flags:0x%x\n", (u32)rctx->flags);
  84
  85	switch (rctx->flags & SHA_FLAGS_MASK) {
  86	case SHA_FLAGS_SHA1:
  87	case SHA_FLAGS_SHA224:
  88	case SHA_FLAGS_SHA256:
  89		bits[0] = cpu_to_be64(rctx->digcnt[0] << 3);
  90		index = rctx->bufcnt & 0x3f;
  91		padlen = (index < 56) ? (56 - index) : ((64 + 56) - index);
  92		*(rctx->buffer + rctx->bufcnt) = 0x80;
  93		memset(rctx->buffer + rctx->bufcnt + 1, 0, padlen - 1);
  94		memcpy(rctx->buffer + rctx->bufcnt + padlen, bits, 8);
  95		rctx->bufcnt += padlen + 8;
  96		break;
  97	default:
  98		bits[1] = cpu_to_be64(rctx->digcnt[0] << 3);
  99		bits[0] = cpu_to_be64(rctx->digcnt[1] << 3 |
 100				      rctx->digcnt[0] >> 61);
 101		index = rctx->bufcnt & 0x7f;
 102		padlen = (index < 112) ? (112 - index) : ((128 + 112) - index);
 103		*(rctx->buffer + rctx->bufcnt) = 0x80;
 104		memset(rctx->buffer + rctx->bufcnt + 1, 0, padlen - 1);
 105		memcpy(rctx->buffer + rctx->bufcnt + padlen, bits, 16);
 106		rctx->bufcnt += padlen + 16;
 107		break;
 108	}
 109}
 110
 111/*
 112 * Prepare DMA buffer before hardware engine
 113 * processing.
 114 */
 115static int aspeed_ahash_dma_prepare(struct aspeed_hace_dev *hace_dev)
 116{
 117	struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
 118	struct ahash_request *req = hash_engine->req;
 119	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
 120	int length, remain;
 121
 122	length = rctx->total + rctx->bufcnt;
 123	remain = length % rctx->block_size;
 124
 125	AHASH_DBG(hace_dev, "length:0x%x, remain:0x%x\n", length, remain);
 126
 127	if (rctx->bufcnt)
 128		memcpy(hash_engine->ahash_src_addr, rctx->buffer, rctx->bufcnt);
 129
 130	if (rctx->total + rctx->bufcnt < ASPEED_CRYPTO_SRC_DMA_BUF_LEN) {
 131		scatterwalk_map_and_copy(hash_engine->ahash_src_addr +
 132					 rctx->bufcnt, rctx->src_sg,
 133					 rctx->offset, rctx->total - remain, 0);
 134		rctx->offset += rctx->total - remain;
 135
 136	} else {
 137		dev_warn(hace_dev->dev, "Hash data length is too large\n");
 138		return -EINVAL;
 139	}
 140
 141	scatterwalk_map_and_copy(rctx->buffer, rctx->src_sg,
 142				 rctx->offset, remain, 0);
 143
 144	rctx->bufcnt = remain;
 145	rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest,
 146					       SHA512_DIGEST_SIZE,
 147					       DMA_BIDIRECTIONAL);
 148	if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
 149		dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
 150		return -ENOMEM;
 151	}
 152
 153	hash_engine->src_length = length - remain;
 154	hash_engine->src_dma = hash_engine->ahash_src_dma_addr;
 155	hash_engine->digest_dma = rctx->digest_dma_addr;
 156
 157	return 0;
 158}
 159
 160/*
 161 * Prepare DMA buffer as SG list buffer before
 162 * hardware engine processing.
 163 */
 164static int aspeed_ahash_dma_prepare_sg(struct aspeed_hace_dev *hace_dev)
 165{
 166	struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
 167	struct ahash_request *req = hash_engine->req;
 168	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
 169	struct aspeed_sg_list *src_list;
 170	struct scatterlist *s;
 171	int length, remain, sg_len, i;
 172	int rc = 0;
 173
 174	remain = (rctx->total + rctx->bufcnt) % rctx->block_size;
 175	length = rctx->total + rctx->bufcnt - remain;
 176
 177	AHASH_DBG(hace_dev, "%s:0x%x, %s:%zu, %s:0x%x, %s:0x%x\n",
 178		  "rctx total", rctx->total, "bufcnt", rctx->bufcnt,
 179		  "length", length, "remain", remain);
 180
 181	sg_len = dma_map_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents,
 182			    DMA_TO_DEVICE);
 183	if (!sg_len) {
 184		dev_warn(hace_dev->dev, "dma_map_sg() src error\n");
 185		rc = -ENOMEM;
 186		goto end;
 187	}
 188
 189	src_list = (struct aspeed_sg_list *)hash_engine->ahash_src_addr;
 190	rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest,
 191					       SHA512_DIGEST_SIZE,
 192					       DMA_BIDIRECTIONAL);
 193	if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
 194		dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
 195		rc = -ENOMEM;
 196		goto free_src_sg;
 197	}
 198
 199	if (rctx->bufcnt != 0) {
 200		u32 phy_addr;
 201		u32 len;
 202
 203		rctx->buffer_dma_addr = dma_map_single(hace_dev->dev,
 204						       rctx->buffer,
 205						       rctx->block_size * 2,
 206						       DMA_TO_DEVICE);
 207		if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) {
 208			dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n");
 209			rc = -ENOMEM;
 210			goto free_rctx_digest;
 211		}
 212
 213		phy_addr = rctx->buffer_dma_addr;
 214		len = rctx->bufcnt;
 215		length -= len;
 216
 217		/* Last sg list */
 218		if (length == 0)
 219			len |= HASH_SG_LAST_LIST;
 220
 221		src_list[0].phy_addr = cpu_to_le32(phy_addr);
 222		src_list[0].len = cpu_to_le32(len);
 223		src_list++;
 224	}
 225
 226	if (length != 0) {
 227		for_each_sg(rctx->src_sg, s, sg_len, i) {
 228			u32 phy_addr = sg_dma_address(s);
 229			u32 len = sg_dma_len(s);
 230
 231			if (length > len)
 232				length -= len;
 233			else {
 234				/* Last sg list */
 235				len = length;
 236				len |= HASH_SG_LAST_LIST;
 237				length = 0;
 238			}
 239
 240			src_list[i].phy_addr = cpu_to_le32(phy_addr);
 241			src_list[i].len = cpu_to_le32(len);
 242		}
 243	}
 244
 245	if (length != 0) {
 246		rc = -EINVAL;
 247		goto free_rctx_buffer;
 248	}
 249
 250	rctx->offset = rctx->total - remain;
 251	hash_engine->src_length = rctx->total + rctx->bufcnt - remain;
 252	hash_engine->src_dma = hash_engine->ahash_src_dma_addr;
 253	hash_engine->digest_dma = rctx->digest_dma_addr;
 254
 255	return 0;
 256
 257free_rctx_buffer:
 258	if (rctx->bufcnt != 0)
 259		dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
 260				 rctx->block_size * 2, DMA_TO_DEVICE);
 261free_rctx_digest:
 262	dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
 263			 SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
 264free_src_sg:
 265	dma_unmap_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents,
 266		     DMA_TO_DEVICE);
 267end:
 268	return rc;
 269}
 270
 271static int aspeed_ahash_complete(struct aspeed_hace_dev *hace_dev)
 272{
 273	struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
 274	struct ahash_request *req = hash_engine->req;
 275
 276	AHASH_DBG(hace_dev, "\n");
 277
 278	hash_engine->flags &= ~CRYPTO_FLAGS_BUSY;
 279
 280	crypto_finalize_hash_request(hace_dev->crypt_engine_hash, req, 0);
 281
 282	return 0;
 283}
 284
 285/*
 286 * Copy digest to the corresponding request result.
 287 * This function will be called at final() stage.
 288 */
 289static int aspeed_ahash_transfer(struct aspeed_hace_dev *hace_dev)
 290{
 291	struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
 292	struct ahash_request *req = hash_engine->req;
 293	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
 294
 295	AHASH_DBG(hace_dev, "\n");
 296
 297	dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
 298			 SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
 299
 300	dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
 301			 rctx->block_size * 2, DMA_TO_DEVICE);
 302
 303	memcpy(req->result, rctx->digest, rctx->digsize);
 304
 305	return aspeed_ahash_complete(hace_dev);
 306}
 307
 308/*
 309 * Trigger hardware engines to do the math.
 310 */
 311static int aspeed_hace_ahash_trigger(struct aspeed_hace_dev *hace_dev,
 312				     aspeed_hace_fn_t resume)
 313{
 314	struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
 315	struct ahash_request *req = hash_engine->req;
 316	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
 317
 318	AHASH_DBG(hace_dev, "src_dma:%pad, digest_dma:%pad, length:%zu\n",
 319		  &hash_engine->src_dma, &hash_engine->digest_dma,
 320		  hash_engine->src_length);
 321
 322	rctx->cmd |= HASH_CMD_INT_ENABLE;
 323	hash_engine->resume = resume;
 324
 325	ast_hace_write(hace_dev, hash_engine->src_dma, ASPEED_HACE_HASH_SRC);
 326	ast_hace_write(hace_dev, hash_engine->digest_dma,
 327		       ASPEED_HACE_HASH_DIGEST_BUFF);
 328	ast_hace_write(hace_dev, hash_engine->digest_dma,
 329		       ASPEED_HACE_HASH_KEY_BUFF);
 330	ast_hace_write(hace_dev, hash_engine->src_length,
 331		       ASPEED_HACE_HASH_DATA_LEN);
 332
 333	/* Memory barrier to ensure all data setup before engine starts */
 334	mb();
 335
 336	ast_hace_write(hace_dev, rctx->cmd, ASPEED_HACE_HASH_CMD);
 337
 338	return -EINPROGRESS;
 339}
 340
 341/*
 342 * HMAC resume aims to do the second pass produces
 343 * the final HMAC code derived from the inner hash
 344 * result and the outer key.
 345 */
 346static int aspeed_ahash_hmac_resume(struct aspeed_hace_dev *hace_dev)
 347{
 348	struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
 349	struct ahash_request *req = hash_engine->req;
 350	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
 351	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 352	struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
 353	struct aspeed_sha_hmac_ctx *bctx = tctx->base;
 354	int rc = 0;
 355
 356	AHASH_DBG(hace_dev, "\n");
 357
 358	dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
 359			 SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
 360
 361	dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
 362			 rctx->block_size * 2, DMA_TO_DEVICE);
 363
 364	/* o key pad + hash sum 1 */
 365	memcpy(rctx->buffer, bctx->opad, rctx->block_size);
 366	memcpy(rctx->buffer + rctx->block_size, rctx->digest, rctx->digsize);
 367
 368	rctx->bufcnt = rctx->block_size + rctx->digsize;
 369	rctx->digcnt[0] = rctx->block_size + rctx->digsize;
 370
 371	aspeed_ahash_fill_padding(hace_dev, rctx);
 372	memcpy(rctx->digest, rctx->sha_iv, rctx->ivsize);
 373
 374	rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest,
 375					       SHA512_DIGEST_SIZE,
 376					       DMA_BIDIRECTIONAL);
 377	if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
 378		dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
 379		rc = -ENOMEM;
 380		goto end;
 381	}
 382
 383	rctx->buffer_dma_addr = dma_map_single(hace_dev->dev, rctx->buffer,
 384					       rctx->block_size * 2,
 385					       DMA_TO_DEVICE);
 386	if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) {
 387		dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n");
 388		rc = -ENOMEM;
 389		goto free_rctx_digest;
 390	}
 391
 392	hash_engine->src_dma = rctx->buffer_dma_addr;
 393	hash_engine->src_length = rctx->bufcnt;
 394	hash_engine->digest_dma = rctx->digest_dma_addr;
 395
 396	return aspeed_hace_ahash_trigger(hace_dev, aspeed_ahash_transfer);
 397
 398free_rctx_digest:
 399	dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
 400			 SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
 401end:
 402	return rc;
 403}
 404
 405static int aspeed_ahash_req_final(struct aspeed_hace_dev *hace_dev)
 406{
 407	struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
 408	struct ahash_request *req = hash_engine->req;
 409	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
 410	int rc = 0;
 411
 412	AHASH_DBG(hace_dev, "\n");
 413
 414	aspeed_ahash_fill_padding(hace_dev, rctx);
 415
 416	rctx->digest_dma_addr = dma_map_single(hace_dev->dev,
 417					       rctx->digest,
 418					       SHA512_DIGEST_SIZE,
 419					       DMA_BIDIRECTIONAL);
 420	if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
 421		dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
 422		rc = -ENOMEM;
 423		goto end;
 424	}
 425
 426	rctx->buffer_dma_addr = dma_map_single(hace_dev->dev,
 427					       rctx->buffer,
 428					       rctx->block_size * 2,
 429					       DMA_TO_DEVICE);
 430	if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) {
 431		dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n");
 432		rc = -ENOMEM;
 433		goto free_rctx_digest;
 434	}
 435
 436	hash_engine->src_dma = rctx->buffer_dma_addr;
 437	hash_engine->src_length = rctx->bufcnt;
 438	hash_engine->digest_dma = rctx->digest_dma_addr;
 439
 440	if (rctx->flags & SHA_FLAGS_HMAC)
 441		return aspeed_hace_ahash_trigger(hace_dev,
 442						 aspeed_ahash_hmac_resume);
 443
 444	return aspeed_hace_ahash_trigger(hace_dev, aspeed_ahash_transfer);
 445
 446free_rctx_digest:
 447	dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
 448			 SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
 449end:
 450	return rc;
 451}
 452
 453static int aspeed_ahash_update_resume_sg(struct aspeed_hace_dev *hace_dev)
 454{
 455	struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
 456	struct ahash_request *req = hash_engine->req;
 457	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
 458
 459	AHASH_DBG(hace_dev, "\n");
 460
 461	dma_unmap_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents,
 462		     DMA_TO_DEVICE);
 463
 464	if (rctx->bufcnt != 0)
 465		dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
 466				 rctx->block_size * 2,
 467				 DMA_TO_DEVICE);
 468
 469	dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
 470			 SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
 471
 472	scatterwalk_map_and_copy(rctx->buffer, rctx->src_sg, rctx->offset,
 473				 rctx->total - rctx->offset, 0);
 474
 475	rctx->bufcnt = rctx->total - rctx->offset;
 476	rctx->cmd &= ~HASH_CMD_HASH_SRC_SG_CTRL;
 477
 478	if (rctx->flags & SHA_FLAGS_FINUP)
 479		return aspeed_ahash_req_final(hace_dev);
 480
 481	return aspeed_ahash_complete(hace_dev);
 482}
 483
 484static int aspeed_ahash_update_resume(struct aspeed_hace_dev *hace_dev)
 485{
 486	struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
 487	struct ahash_request *req = hash_engine->req;
 488	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
 489
 490	AHASH_DBG(hace_dev, "\n");
 491
 492	dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
 493			 SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
 494
 495	if (rctx->flags & SHA_FLAGS_FINUP)
 496		return aspeed_ahash_req_final(hace_dev);
 497
 498	return aspeed_ahash_complete(hace_dev);
 499}
 500
 501static int aspeed_ahash_req_update(struct aspeed_hace_dev *hace_dev)
 502{
 503	struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
 504	struct ahash_request *req = hash_engine->req;
 505	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
 506	aspeed_hace_fn_t resume;
 507	int ret;
 508
 509	AHASH_DBG(hace_dev, "\n");
 510
 511	if (hace_dev->version == AST2600_VERSION) {
 512		rctx->cmd |= HASH_CMD_HASH_SRC_SG_CTRL;
 513		resume = aspeed_ahash_update_resume_sg;
 514
 515	} else {
 516		resume = aspeed_ahash_update_resume;
 517	}
 518
 519	ret = hash_engine->dma_prepare(hace_dev);
 520	if (ret)
 521		return ret;
 522
 523	return aspeed_hace_ahash_trigger(hace_dev, resume);
 524}
 525
 526static int aspeed_hace_hash_handle_queue(struct aspeed_hace_dev *hace_dev,
 527				  struct ahash_request *req)
 528{
 529	return crypto_transfer_hash_request_to_engine(
 530			hace_dev->crypt_engine_hash, req);
 531}
 532
 533static int aspeed_ahash_do_request(struct crypto_engine *engine, void *areq)
 534{
 535	struct ahash_request *req = ahash_request_cast(areq);
 536	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
 537	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 538	struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
 539	struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
 540	struct aspeed_engine_hash *hash_engine;
 541	int ret = 0;
 542
 543	hash_engine = &hace_dev->hash_engine;
 544	hash_engine->flags |= CRYPTO_FLAGS_BUSY;
 545
 546	if (rctx->op == SHA_OP_UPDATE)
 547		ret = aspeed_ahash_req_update(hace_dev);
 548	else if (rctx->op == SHA_OP_FINAL)
 549		ret = aspeed_ahash_req_final(hace_dev);
 550
 551	if (ret != -EINPROGRESS)
 552		return ret;
 553
 554	return 0;
 555}
 556
 557static void aspeed_ahash_prepare_request(struct crypto_engine *engine,
 558					 void *areq)
 559{
 560	struct ahash_request *req = ahash_request_cast(areq);
 561	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 562	struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
 563	struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
 564	struct aspeed_engine_hash *hash_engine;
 565
 566	hash_engine = &hace_dev->hash_engine;
 567	hash_engine->req = req;
 568
 569	if (hace_dev->version == AST2600_VERSION)
 570		hash_engine->dma_prepare = aspeed_ahash_dma_prepare_sg;
 571	else
 572		hash_engine->dma_prepare = aspeed_ahash_dma_prepare;
 573}
 574
 575static int aspeed_ahash_do_one(struct crypto_engine *engine, void *areq)
 576{
 577	aspeed_ahash_prepare_request(engine, areq);
 578	return aspeed_ahash_do_request(engine, areq);
 579}
 580
 581static int aspeed_sham_update(struct ahash_request *req)
 582{
 583	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
 584	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 585	struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
 586	struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
 587
 588	AHASH_DBG(hace_dev, "req->nbytes: %d\n", req->nbytes);
 589
 590	rctx->total = req->nbytes;
 591	rctx->src_sg = req->src;
 592	rctx->offset = 0;
 593	rctx->src_nents = sg_nents(req->src);
 594	rctx->op = SHA_OP_UPDATE;
 595
 596	rctx->digcnt[0] += rctx->total;
 597	if (rctx->digcnt[0] < rctx->total)
 598		rctx->digcnt[1]++;
 599
 600	if (rctx->bufcnt + rctx->total < rctx->block_size) {
 601		scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt,
 602					 rctx->src_sg, rctx->offset,
 603					 rctx->total, 0);
 604		rctx->bufcnt += rctx->total;
 605
 606		return 0;
 607	}
 608
 609	return aspeed_hace_hash_handle_queue(hace_dev, req);
 610}
 611
 612static int aspeed_sham_shash_digest(struct crypto_shash *tfm, u32 flags,
 613				    const u8 *data, unsigned int len, u8 *out)
 614{
 615	SHASH_DESC_ON_STACK(shash, tfm);
 616
 617	shash->tfm = tfm;
 618
 619	return crypto_shash_digest(shash, data, len, out);
 620}
 621
 622static int aspeed_sham_final(struct ahash_request *req)
 623{
 624	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
 625	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 626	struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
 627	struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
 628
 629	AHASH_DBG(hace_dev, "req->nbytes:%d, rctx->total:%d\n",
 630		  req->nbytes, rctx->total);
 631	rctx->op = SHA_OP_FINAL;
 632
 633	return aspeed_hace_hash_handle_queue(hace_dev, req);
 634}
 635
 636static int aspeed_sham_finup(struct ahash_request *req)
 637{
 638	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
 639	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 640	struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
 641	struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
 642	int rc1, rc2;
 643
 644	AHASH_DBG(hace_dev, "req->nbytes: %d\n", req->nbytes);
 645
 646	rctx->flags |= SHA_FLAGS_FINUP;
 647
 648	rc1 = aspeed_sham_update(req);
 649	if (rc1 == -EINPROGRESS || rc1 == -EBUSY)
 650		return rc1;
 651
 652	/*
 653	 * final() has to be always called to cleanup resources
 654	 * even if update() failed, except EINPROGRESS
 655	 */
 656	rc2 = aspeed_sham_final(req);
 657
 658	return rc1 ? : rc2;
 659}
 660
 661static int aspeed_sham_init(struct ahash_request *req)
 662{
 663	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
 664	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 665	struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
 666	struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
 667	struct aspeed_sha_hmac_ctx *bctx = tctx->base;
 668
 669	AHASH_DBG(hace_dev, "%s: digest size:%d\n",
 670		  crypto_tfm_alg_name(&tfm->base),
 671		  crypto_ahash_digestsize(tfm));
 672
 673	rctx->cmd = HASH_CMD_ACC_MODE;
 674	rctx->flags = 0;
 675
 676	switch (crypto_ahash_digestsize(tfm)) {
 677	case SHA1_DIGEST_SIZE:
 678		rctx->cmd |= HASH_CMD_SHA1 | HASH_CMD_SHA_SWAP;
 679		rctx->flags |= SHA_FLAGS_SHA1;
 680		rctx->digsize = SHA1_DIGEST_SIZE;
 681		rctx->block_size = SHA1_BLOCK_SIZE;
 682		rctx->sha_iv = sha1_iv;
 683		rctx->ivsize = 32;
 684		memcpy(rctx->digest, sha1_iv, rctx->ivsize);
 685		break;
 686	case SHA224_DIGEST_SIZE:
 687		rctx->cmd |= HASH_CMD_SHA224 | HASH_CMD_SHA_SWAP;
 688		rctx->flags |= SHA_FLAGS_SHA224;
 689		rctx->digsize = SHA224_DIGEST_SIZE;
 690		rctx->block_size = SHA224_BLOCK_SIZE;
 691		rctx->sha_iv = sha224_iv;
 692		rctx->ivsize = 32;
 693		memcpy(rctx->digest, sha224_iv, rctx->ivsize);
 694		break;
 695	case SHA256_DIGEST_SIZE:
 696		rctx->cmd |= HASH_CMD_SHA256 | HASH_CMD_SHA_SWAP;
 697		rctx->flags |= SHA_FLAGS_SHA256;
 698		rctx->digsize = SHA256_DIGEST_SIZE;
 699		rctx->block_size = SHA256_BLOCK_SIZE;
 700		rctx->sha_iv = sha256_iv;
 701		rctx->ivsize = 32;
 702		memcpy(rctx->digest, sha256_iv, rctx->ivsize);
 703		break;
 704	case SHA384_DIGEST_SIZE:
 705		rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA384 |
 706			     HASH_CMD_SHA_SWAP;
 707		rctx->flags |= SHA_FLAGS_SHA384;
 708		rctx->digsize = SHA384_DIGEST_SIZE;
 709		rctx->block_size = SHA384_BLOCK_SIZE;
 710		rctx->sha_iv = (const __be32 *)sha384_iv;
 711		rctx->ivsize = 64;
 712		memcpy(rctx->digest, sha384_iv, rctx->ivsize);
 713		break;
 714	case SHA512_DIGEST_SIZE:
 715		rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA512 |
 716			     HASH_CMD_SHA_SWAP;
 717		rctx->flags |= SHA_FLAGS_SHA512;
 718		rctx->digsize = SHA512_DIGEST_SIZE;
 719		rctx->block_size = SHA512_BLOCK_SIZE;
 720		rctx->sha_iv = (const __be32 *)sha512_iv;
 721		rctx->ivsize = 64;
 722		memcpy(rctx->digest, sha512_iv, rctx->ivsize);
 723		break;
 724	default:
 725		dev_warn(tctx->hace_dev->dev, "digest size %d not support\n",
 726			 crypto_ahash_digestsize(tfm));
 727		return -EINVAL;
 728	}
 729
 730	rctx->bufcnt = 0;
 731	rctx->total = 0;
 732	rctx->digcnt[0] = 0;
 733	rctx->digcnt[1] = 0;
 734
 735	/* HMAC init */
 736	if (tctx->flags & SHA_FLAGS_HMAC) {
 737		rctx->digcnt[0] = rctx->block_size;
 738		rctx->bufcnt = rctx->block_size;
 739		memcpy(rctx->buffer, bctx->ipad, rctx->block_size);
 740		rctx->flags |= SHA_FLAGS_HMAC;
 741	}
 742
 743	return 0;
 744}
 745
 746static int aspeed_sham_digest(struct ahash_request *req)
 747{
 748	return aspeed_sham_init(req) ? : aspeed_sham_finup(req);
 749}
 750
 751static int aspeed_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
 752			      unsigned int keylen)
 753{
 754	struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
 755	struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
 756	struct aspeed_sha_hmac_ctx *bctx = tctx->base;
 757	int ds = crypto_shash_digestsize(bctx->shash);
 758	int bs = crypto_shash_blocksize(bctx->shash);
 759	int err = 0;
 760	int i;
 761
 762	AHASH_DBG(hace_dev, "%s: keylen:%d\n", crypto_tfm_alg_name(&tfm->base),
 763		  keylen);
 764
 765	if (keylen > bs) {
 766		err = aspeed_sham_shash_digest(bctx->shash,
 767					       crypto_shash_get_flags(bctx->shash),
 768					       key, keylen, bctx->ipad);
 769		if (err)
 770			return err;
 771		keylen = ds;
 772
 773	} else {
 774		memcpy(bctx->ipad, key, keylen);
 775	}
 776
 777	memset(bctx->ipad + keylen, 0, bs - keylen);
 778	memcpy(bctx->opad, bctx->ipad, bs);
 779
 780	for (i = 0; i < bs; i++) {
 781		bctx->ipad[i] ^= HMAC_IPAD_VALUE;
 782		bctx->opad[i] ^= HMAC_OPAD_VALUE;
 783	}
 784
 785	return err;
 786}
 787
 788static int aspeed_sham_cra_init(struct crypto_tfm *tfm)
 789{
 790	struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
 791	struct aspeed_sham_ctx *tctx = crypto_tfm_ctx(tfm);
 792	struct aspeed_hace_alg *ast_alg;
 793
 794	ast_alg = container_of(alg, struct aspeed_hace_alg, alg.ahash.base);
 795	tctx->hace_dev = ast_alg->hace_dev;
 796	tctx->flags = 0;
 797
 798	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 799				 sizeof(struct aspeed_sham_reqctx));
 800
 801	if (ast_alg->alg_base) {
 802		/* hmac related */
 803		struct aspeed_sha_hmac_ctx *bctx = tctx->base;
 804
 805		tctx->flags |= SHA_FLAGS_HMAC;
 806		bctx->shash = crypto_alloc_shash(ast_alg->alg_base, 0,
 807						 CRYPTO_ALG_NEED_FALLBACK);
 808		if (IS_ERR(bctx->shash)) {
 809			dev_warn(ast_alg->hace_dev->dev,
 810				 "base driver '%s' could not be loaded.\n",
 811				 ast_alg->alg_base);
 812			return PTR_ERR(bctx->shash);
 813		}
 814	}
 815
 816	return 0;
 817}
 818
 819static void aspeed_sham_cra_exit(struct crypto_tfm *tfm)
 820{
 821	struct aspeed_sham_ctx *tctx = crypto_tfm_ctx(tfm);
 822	struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
 823
 824	AHASH_DBG(hace_dev, "%s\n", crypto_tfm_alg_name(tfm));
 825
 826	if (tctx->flags & SHA_FLAGS_HMAC) {
 827		struct aspeed_sha_hmac_ctx *bctx = tctx->base;
 828
 829		crypto_free_shash(bctx->shash);
 830	}
 831}
 832
 833static int aspeed_sham_export(struct ahash_request *req, void *out)
 834{
 835	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
 836
 837	memcpy(out, rctx, sizeof(*rctx));
 838
 839	return 0;
 840}
 841
 842static int aspeed_sham_import(struct ahash_request *req, const void *in)
 843{
 844	struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
 845
 846	memcpy(rctx, in, sizeof(*rctx));
 847
 848	return 0;
 849}
 850
 851static struct aspeed_hace_alg aspeed_ahash_algs[] = {
 852	{
 853		.alg.ahash.base = {
 854			.init	= aspeed_sham_init,
 855			.update	= aspeed_sham_update,
 856			.final	= aspeed_sham_final,
 857			.finup	= aspeed_sham_finup,
 858			.digest	= aspeed_sham_digest,
 859			.export	= aspeed_sham_export,
 860			.import	= aspeed_sham_import,
 861			.halg = {
 862				.digestsize = SHA1_DIGEST_SIZE,
 863				.statesize = sizeof(struct aspeed_sham_reqctx),
 864				.base = {
 865					.cra_name		= "sha1",
 866					.cra_driver_name	= "aspeed-sha1",
 867					.cra_priority		= 300,
 868					.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
 869								  CRYPTO_ALG_ASYNC |
 870								  CRYPTO_ALG_KERN_DRIVER_ONLY,
 871					.cra_blocksize		= SHA1_BLOCK_SIZE,
 872					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx),
 873					.cra_alignmask		= 0,
 874					.cra_module		= THIS_MODULE,
 875					.cra_init		= aspeed_sham_cra_init,
 876					.cra_exit		= aspeed_sham_cra_exit,
 877				}
 878			}
 879		},
 880		.alg.ahash.op = {
 881			.do_one_request = aspeed_ahash_do_one,
 882		},
 883	},
 884	{
 885		.alg.ahash.base = {
 886			.init	= aspeed_sham_init,
 887			.update	= aspeed_sham_update,
 888			.final	= aspeed_sham_final,
 889			.finup	= aspeed_sham_finup,
 890			.digest	= aspeed_sham_digest,
 891			.export	= aspeed_sham_export,
 892			.import	= aspeed_sham_import,
 893			.halg = {
 894				.digestsize = SHA256_DIGEST_SIZE,
 895				.statesize = sizeof(struct aspeed_sham_reqctx),
 896				.base = {
 897					.cra_name		= "sha256",
 898					.cra_driver_name	= "aspeed-sha256",
 899					.cra_priority		= 300,
 900					.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
 901								  CRYPTO_ALG_ASYNC |
 902								  CRYPTO_ALG_KERN_DRIVER_ONLY,
 903					.cra_blocksize		= SHA256_BLOCK_SIZE,
 904					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx),
 905					.cra_alignmask		= 0,
 906					.cra_module		= THIS_MODULE,
 907					.cra_init		= aspeed_sham_cra_init,
 908					.cra_exit		= aspeed_sham_cra_exit,
 909				}
 910			}
 911		},
 912		.alg.ahash.op = {
 913			.do_one_request = aspeed_ahash_do_one,
 914		},
 915	},
 916	{
 917		.alg.ahash.base = {
 918			.init	= aspeed_sham_init,
 919			.update	= aspeed_sham_update,
 920			.final	= aspeed_sham_final,
 921			.finup	= aspeed_sham_finup,
 922			.digest	= aspeed_sham_digest,
 923			.export	= aspeed_sham_export,
 924			.import	= aspeed_sham_import,
 925			.halg = {
 926				.digestsize = SHA224_DIGEST_SIZE,
 927				.statesize = sizeof(struct aspeed_sham_reqctx),
 928				.base = {
 929					.cra_name		= "sha224",
 930					.cra_driver_name	= "aspeed-sha224",
 931					.cra_priority		= 300,
 932					.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
 933								  CRYPTO_ALG_ASYNC |
 934								  CRYPTO_ALG_KERN_DRIVER_ONLY,
 935					.cra_blocksize		= SHA224_BLOCK_SIZE,
 936					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx),
 937					.cra_alignmask		= 0,
 938					.cra_module		= THIS_MODULE,
 939					.cra_init		= aspeed_sham_cra_init,
 940					.cra_exit		= aspeed_sham_cra_exit,
 941				}
 942			}
 943		},
 944		.alg.ahash.op = {
 945			.do_one_request = aspeed_ahash_do_one,
 946		},
 947	},
 948	{
 949		.alg_base = "sha1",
 950		.alg.ahash.base = {
 951			.init	= aspeed_sham_init,
 952			.update	= aspeed_sham_update,
 953			.final	= aspeed_sham_final,
 954			.finup	= aspeed_sham_finup,
 955			.digest	= aspeed_sham_digest,
 956			.setkey	= aspeed_sham_setkey,
 957			.export	= aspeed_sham_export,
 958			.import	= aspeed_sham_import,
 959			.halg = {
 960				.digestsize = SHA1_DIGEST_SIZE,
 961				.statesize = sizeof(struct aspeed_sham_reqctx),
 962				.base = {
 963					.cra_name		= "hmac(sha1)",
 964					.cra_driver_name	= "aspeed-hmac-sha1",
 965					.cra_priority		= 300,
 966					.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
 967								  CRYPTO_ALG_ASYNC |
 968								  CRYPTO_ALG_KERN_DRIVER_ONLY,
 969					.cra_blocksize		= SHA1_BLOCK_SIZE,
 970					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx) +
 971								sizeof(struct aspeed_sha_hmac_ctx),
 972					.cra_alignmask		= 0,
 973					.cra_module		= THIS_MODULE,
 974					.cra_init		= aspeed_sham_cra_init,
 975					.cra_exit		= aspeed_sham_cra_exit,
 976				}
 977			}
 978		},
 979		.alg.ahash.op = {
 980			.do_one_request = aspeed_ahash_do_one,
 981		},
 982	},
 983	{
 984		.alg_base = "sha224",
 985		.alg.ahash.base = {
 986			.init	= aspeed_sham_init,
 987			.update	= aspeed_sham_update,
 988			.final	= aspeed_sham_final,
 989			.finup	= aspeed_sham_finup,
 990			.digest	= aspeed_sham_digest,
 991			.setkey	= aspeed_sham_setkey,
 992			.export	= aspeed_sham_export,
 993			.import	= aspeed_sham_import,
 994			.halg = {
 995				.digestsize = SHA224_DIGEST_SIZE,
 996				.statesize = sizeof(struct aspeed_sham_reqctx),
 997				.base = {
 998					.cra_name		= "hmac(sha224)",
 999					.cra_driver_name	= "aspeed-hmac-sha224",
1000					.cra_priority		= 300,
1001					.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
1002								  CRYPTO_ALG_ASYNC |
1003								  CRYPTO_ALG_KERN_DRIVER_ONLY,
1004					.cra_blocksize		= SHA224_BLOCK_SIZE,
1005					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx) +
1006								sizeof(struct aspeed_sha_hmac_ctx),
1007					.cra_alignmask		= 0,
1008					.cra_module		= THIS_MODULE,
1009					.cra_init		= aspeed_sham_cra_init,
1010					.cra_exit		= aspeed_sham_cra_exit,
1011				}
1012			}
1013		},
1014		.alg.ahash.op = {
1015			.do_one_request = aspeed_ahash_do_one,
1016		},
1017	},
1018	{
1019		.alg_base = "sha256",
1020		.alg.ahash.base = {
1021			.init	= aspeed_sham_init,
1022			.update	= aspeed_sham_update,
1023			.final	= aspeed_sham_final,
1024			.finup	= aspeed_sham_finup,
1025			.digest	= aspeed_sham_digest,
1026			.setkey	= aspeed_sham_setkey,
1027			.export	= aspeed_sham_export,
1028			.import	= aspeed_sham_import,
1029			.halg = {
1030				.digestsize = SHA256_DIGEST_SIZE,
1031				.statesize = sizeof(struct aspeed_sham_reqctx),
1032				.base = {
1033					.cra_name		= "hmac(sha256)",
1034					.cra_driver_name	= "aspeed-hmac-sha256",
1035					.cra_priority		= 300,
1036					.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
1037								  CRYPTO_ALG_ASYNC |
1038								  CRYPTO_ALG_KERN_DRIVER_ONLY,
1039					.cra_blocksize		= SHA256_BLOCK_SIZE,
1040					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx) +
1041								sizeof(struct aspeed_sha_hmac_ctx),
1042					.cra_alignmask		= 0,
1043					.cra_module		= THIS_MODULE,
1044					.cra_init		= aspeed_sham_cra_init,
1045					.cra_exit		= aspeed_sham_cra_exit,
1046				}
1047			}
1048		},
1049		.alg.ahash.op = {
1050			.do_one_request = aspeed_ahash_do_one,
1051		},
1052	},
1053};
1054
1055static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
1056	{
1057		.alg.ahash.base = {
1058			.init	= aspeed_sham_init,
1059			.update	= aspeed_sham_update,
1060			.final	= aspeed_sham_final,
1061			.finup	= aspeed_sham_finup,
1062			.digest	= aspeed_sham_digest,
1063			.export	= aspeed_sham_export,
1064			.import	= aspeed_sham_import,
1065			.halg = {
1066				.digestsize = SHA384_DIGEST_SIZE,
1067				.statesize = sizeof(struct aspeed_sham_reqctx),
1068				.base = {
1069					.cra_name		= "sha384",
1070					.cra_driver_name	= "aspeed-sha384",
1071					.cra_priority		= 300,
1072					.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
1073								  CRYPTO_ALG_ASYNC |
1074								  CRYPTO_ALG_KERN_DRIVER_ONLY,
1075					.cra_blocksize		= SHA384_BLOCK_SIZE,
1076					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx),
1077					.cra_alignmask		= 0,
1078					.cra_module		= THIS_MODULE,
1079					.cra_init		= aspeed_sham_cra_init,
1080					.cra_exit		= aspeed_sham_cra_exit,
1081				}
1082			}
1083		},
1084		.alg.ahash.op = {
1085			.do_one_request = aspeed_ahash_do_one,
1086		},
1087	},
1088	{
1089		.alg.ahash.base = {
1090			.init	= aspeed_sham_init,
1091			.update	= aspeed_sham_update,
1092			.final	= aspeed_sham_final,
1093			.finup	= aspeed_sham_finup,
1094			.digest	= aspeed_sham_digest,
1095			.export	= aspeed_sham_export,
1096			.import	= aspeed_sham_import,
1097			.halg = {
1098				.digestsize = SHA512_DIGEST_SIZE,
1099				.statesize = sizeof(struct aspeed_sham_reqctx),
1100				.base = {
1101					.cra_name		= "sha512",
1102					.cra_driver_name	= "aspeed-sha512",
1103					.cra_priority		= 300,
1104					.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
1105								  CRYPTO_ALG_ASYNC |
1106								  CRYPTO_ALG_KERN_DRIVER_ONLY,
1107					.cra_blocksize		= SHA512_BLOCK_SIZE,
1108					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx),
1109					.cra_alignmask		= 0,
1110					.cra_module		= THIS_MODULE,
1111					.cra_init		= aspeed_sham_cra_init,
1112					.cra_exit		= aspeed_sham_cra_exit,
1113				}
1114			}
1115		},
1116		.alg.ahash.op = {
1117			.do_one_request = aspeed_ahash_do_one,
1118		},
1119	},
1120	{
1121		.alg_base = "sha384",
1122		.alg.ahash.base = {
1123			.init	= aspeed_sham_init,
1124			.update	= aspeed_sham_update,
1125			.final	= aspeed_sham_final,
1126			.finup	= aspeed_sham_finup,
1127			.digest	= aspeed_sham_digest,
1128			.setkey	= aspeed_sham_setkey,
1129			.export	= aspeed_sham_export,
1130			.import	= aspeed_sham_import,
1131			.halg = {
1132				.digestsize = SHA384_DIGEST_SIZE,
1133				.statesize = sizeof(struct aspeed_sham_reqctx),
1134				.base = {
1135					.cra_name		= "hmac(sha384)",
1136					.cra_driver_name	= "aspeed-hmac-sha384",
1137					.cra_priority		= 300,
1138					.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
1139								  CRYPTO_ALG_ASYNC |
1140								  CRYPTO_ALG_KERN_DRIVER_ONLY,
1141					.cra_blocksize		= SHA384_BLOCK_SIZE,
1142					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx) +
1143								sizeof(struct aspeed_sha_hmac_ctx),
1144					.cra_alignmask		= 0,
1145					.cra_module		= THIS_MODULE,
1146					.cra_init		= aspeed_sham_cra_init,
1147					.cra_exit		= aspeed_sham_cra_exit,
1148				}
1149			}
1150		},
1151		.alg.ahash.op = {
1152			.do_one_request = aspeed_ahash_do_one,
1153		},
1154	},
1155	{
1156		.alg_base = "sha512",
1157		.alg.ahash.base = {
1158			.init	= aspeed_sham_init,
1159			.update	= aspeed_sham_update,
1160			.final	= aspeed_sham_final,
1161			.finup	= aspeed_sham_finup,
1162			.digest	= aspeed_sham_digest,
1163			.setkey	= aspeed_sham_setkey,
1164			.export	= aspeed_sham_export,
1165			.import	= aspeed_sham_import,
1166			.halg = {
1167				.digestsize = SHA512_DIGEST_SIZE,
1168				.statesize = sizeof(struct aspeed_sham_reqctx),
1169				.base = {
1170					.cra_name		= "hmac(sha512)",
1171					.cra_driver_name	= "aspeed-hmac-sha512",
1172					.cra_priority		= 300,
1173					.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
1174								  CRYPTO_ALG_ASYNC |
1175								  CRYPTO_ALG_KERN_DRIVER_ONLY,
1176					.cra_blocksize		= SHA512_BLOCK_SIZE,
1177					.cra_ctxsize		= sizeof(struct aspeed_sham_ctx) +
1178								sizeof(struct aspeed_sha_hmac_ctx),
1179					.cra_alignmask		= 0,
1180					.cra_module		= THIS_MODULE,
1181					.cra_init		= aspeed_sham_cra_init,
1182					.cra_exit		= aspeed_sham_cra_exit,
1183				}
1184			}
1185		},
1186		.alg.ahash.op = {
1187			.do_one_request = aspeed_ahash_do_one,
1188		},
1189	},
1190};
1191
1192void aspeed_unregister_hace_hash_algs(struct aspeed_hace_dev *hace_dev)
1193{
1194	int i;
1195
1196	for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs); i++)
1197		crypto_engine_unregister_ahash(&aspeed_ahash_algs[i].alg.ahash);
1198
1199	if (hace_dev->version != AST2600_VERSION)
1200		return;
1201
1202	for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs_g6); i++)
1203		crypto_engine_unregister_ahash(&aspeed_ahash_algs_g6[i].alg.ahash);
1204}
1205
1206void aspeed_register_hace_hash_algs(struct aspeed_hace_dev *hace_dev)
1207{
1208	int rc, i;
1209
1210	AHASH_DBG(hace_dev, "\n");
1211
1212	for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs); i++) {
1213		aspeed_ahash_algs[i].hace_dev = hace_dev;
1214		rc = crypto_engine_register_ahash(&aspeed_ahash_algs[i].alg.ahash);
1215		if (rc) {
1216			AHASH_DBG(hace_dev, "Failed to register %s\n",
1217				  aspeed_ahash_algs[i].alg.ahash.base.halg.base.cra_name);
1218		}
1219	}
1220
1221	if (hace_dev->version != AST2600_VERSION)
1222		return;
1223
1224	for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs_g6); i++) {
1225		aspeed_ahash_algs_g6[i].hace_dev = hace_dev;
1226		rc = crypto_engine_register_ahash(&aspeed_ahash_algs_g6[i].alg.ahash);
1227		if (rc) {
1228			AHASH_DBG(hace_dev, "Failed to register %s\n",
1229				  aspeed_ahash_algs_g6[i].alg.ahash.base.halg.base.cra_name);
1230		}
1231	}
1232}