Linux Audio

Check our new training course

Yocto distribution development and maintenance

Need a Yocto distribution for your embedded project?
Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * This file is part of STM32 Crypto driver for Linux.
   4 *
   5 * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
   6 * Author(s): Lionel DEBIEVE <lionel.debieve@st.com> for STMicroelectronics.
   7 */
   8
 
 
 
 
 
 
 
   9#include <linux/clk.h>
  10#include <linux/crypto.h>
  11#include <linux/delay.h>
 
  12#include <linux/dmaengine.h>
  13#include <linux/interrupt.h>
  14#include <linux/io.h>
  15#include <linux/iopoll.h>
  16#include <linux/kernel.h>
  17#include <linux/module.h>
  18#include <linux/of_device.h>
  19#include <linux/platform_device.h>
  20#include <linux/pm_runtime.h>
  21#include <linux/reset.h>
  22
  23#include <crypto/engine.h>
  24#include <crypto/hash.h>
  25#include <crypto/md5.h>
  26#include <crypto/scatterwalk.h>
  27#include <crypto/sha.h>
  28#include <crypto/internal/hash.h>
  29
  30#define HASH_CR				0x00
  31#define HASH_DIN			0x04
  32#define HASH_STR			0x08
 
  33#define HASH_IMR			0x20
  34#define HASH_SR				0x24
  35#define HASH_CSR(x)			(0x0F8 + ((x) * 0x04))
  36#define HASH_HREG(x)			(0x310 + ((x) * 0x04))
  37#define HASH_HWCFGR			0x3F0
  38#define HASH_VER			0x3F4
  39#define HASH_ID				0x3F8
  40
  41/* Control Register */
  42#define HASH_CR_INIT			BIT(2)
  43#define HASH_CR_DMAE			BIT(3)
  44#define HASH_CR_DATATYPE_POS		4
  45#define HASH_CR_MODE			BIT(6)
 
  46#define HASH_CR_MDMAT			BIT(13)
  47#define HASH_CR_DMAA			BIT(14)
  48#define HASH_CR_LKEY			BIT(16)
  49
  50#define HASH_CR_ALGO_SHA1		0x0
  51#define HASH_CR_ALGO_MD5		0x80
  52#define HASH_CR_ALGO_SHA224		0x40000
  53#define HASH_CR_ALGO_SHA256		0x40080
  54
  55/* Interrupt */
  56#define HASH_DINIE			BIT(0)
  57#define HASH_DCIE			BIT(1)
  58
  59/* Interrupt Mask */
  60#define HASH_MASK_CALC_COMPLETION	BIT(0)
  61#define HASH_MASK_DATA_INPUT		BIT(1)
  62
  63/* Context swap register */
  64#define HASH_CSR_REGISTER_NUMBER	53
  65
  66/* Status Flags */
  67#define HASH_SR_DATA_INPUT_READY	BIT(0)
  68#define HASH_SR_OUTPUT_READY		BIT(1)
  69#define HASH_SR_DMA_ACTIVE		BIT(2)
  70#define HASH_SR_BUSY			BIT(3)
  71
  72/* STR Register */
  73#define HASH_STR_NBLW_MASK		GENMASK(4, 0)
  74#define HASH_STR_DCAL			BIT(8)
  75
 
 
 
 
 
 
 
 
 
 
 
 
  76#define HASH_FLAGS_INIT			BIT(0)
  77#define HASH_FLAGS_OUTPUT_READY		BIT(1)
  78#define HASH_FLAGS_CPU			BIT(2)
  79#define HASH_FLAGS_DMA_READY		BIT(3)
  80#define HASH_FLAGS_DMA_ACTIVE		BIT(4)
  81#define HASH_FLAGS_HMAC_INIT		BIT(5)
  82#define HASH_FLAGS_HMAC_FINAL		BIT(6)
  83#define HASH_FLAGS_HMAC_KEY		BIT(7)
  84
  85#define HASH_FLAGS_FINAL		BIT(15)
  86#define HASH_FLAGS_FINUP		BIT(16)
  87#define HASH_FLAGS_ALGO_MASK		GENMASK(21, 18)
  88#define HASH_FLAGS_MD5			BIT(18)
  89#define HASH_FLAGS_SHA1			BIT(19)
  90#define HASH_FLAGS_SHA224		BIT(20)
  91#define HASH_FLAGS_SHA256		BIT(21)
  92#define HASH_FLAGS_ERRORS		BIT(22)
  93#define HASH_FLAGS_HMAC			BIT(23)
 
  94
  95#define HASH_OP_UPDATE			1
  96#define HASH_OP_FINAL			2
  97
 
 
  98enum stm32_hash_data_format {
  99	HASH_DATA_32_BITS		= 0x0,
 100	HASH_DATA_16_BITS		= 0x1,
 101	HASH_DATA_8_BITS		= 0x2,
 102	HASH_DATA_1_BIT			= 0x3
 103};
 104
 105#define HASH_BUFLEN			256
 106#define HASH_LONG_KEY			64
 107#define HASH_MAX_KEY_SIZE		(SHA256_BLOCK_SIZE * 8)
 108#define HASH_QUEUE_LENGTH		16
 109#define HASH_DMA_THRESHOLD		50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 110
 111#define HASH_AUTOSUSPEND_DELAY		50
 112
 113struct stm32_hash_ctx {
 114	struct crypto_engine_ctx enginectx;
 115	struct stm32_hash_dev	*hdev;
 
 116	unsigned long		flags;
 117
 118	u8			key[HASH_MAX_KEY_SIZE];
 119	int			keylen;
 120};
 121
 
 
 
 
 
 
 
 
 
 
 
 
 122struct stm32_hash_request_ctx {
 123	struct stm32_hash_dev	*hdev;
 124	unsigned long		flags;
 125	unsigned long		op;
 126
 127	u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
 128	size_t			digcnt;
 129	size_t			bufcnt;
 130	size_t			buflen;
 131
 132	/* DMA */
 133	struct scatterlist	*sg;
 
 134	unsigned int		offset;
 135	unsigned int		total;
 136	struct scatterlist	sg_key;
 137
 138	dma_addr_t		dma_addr;
 139	size_t			dma_ct;
 140	int			nents;
 141
 142	u8			data_type;
 143
 144	u8 buffer[HASH_BUFLEN] __aligned(sizeof(u32));
 145
 146	/* Export Context */
 147	u32			*hw_context;
 148};
 149
 150struct stm32_hash_algs_info {
 151	struct ahash_alg	*algs_list;
 152	size_t			size;
 153};
 154
 155struct stm32_hash_pdata {
 156	struct stm32_hash_algs_info	*algs_info;
 157	size_t				algs_info_size;
 
 
 
 
 
 
 158};
 159
 160struct stm32_hash_dev {
 161	struct list_head	list;
 162	struct device		*dev;
 163	struct clk		*clk;
 164	struct reset_control	*rst;
 165	void __iomem		*io_base;
 166	phys_addr_t		phys_base;
 
 167	u32			dma_mode;
 168	u32			dma_maxburst;
 169
 170	struct ahash_request	*req;
 171	struct crypto_engine	*engine;
 172
 173	int			err;
 174	unsigned long		flags;
 175
 176	struct dma_chan		*dma_lch;
 177	struct completion	dma_completion;
 178
 179	const struct stm32_hash_pdata	*pdata;
 180};
 181
 182struct stm32_hash_drv {
 183	struct list_head	dev_list;
 184	spinlock_t		lock; /* List protection access */
 185};
 186
 187static struct stm32_hash_drv stm32_hash = {
 188	.dev_list = LIST_HEAD_INIT(stm32_hash.dev_list),
 189	.lock = __SPIN_LOCK_UNLOCKED(stm32_hash.lock),
 190};
 191
 192static void stm32_hash_dma_callback(void *param);
 
 
 193
 194static inline u32 stm32_hash_read(struct stm32_hash_dev *hdev, u32 offset)
 195{
 196	return readl_relaxed(hdev->io_base + offset);
 197}
 198
 199static inline void stm32_hash_write(struct stm32_hash_dev *hdev,
 200				    u32 offset, u32 value)
 201{
 202	writel_relaxed(value, hdev->io_base + offset);
 203}
 204
 
 
 
 
 
 205static inline int stm32_hash_wait_busy(struct stm32_hash_dev *hdev)
 206{
 207	u32 status;
 208
 
 
 
 
 
 209	return readl_relaxed_poll_timeout(hdev->io_base + HASH_SR, status,
 210				   !(status & HASH_SR_BUSY), 10, 10000);
 211}
 212
 
 
 
 
 
 213static void stm32_hash_set_nblw(struct stm32_hash_dev *hdev, int length)
 214{
 215	u32 reg;
 216
 217	reg = stm32_hash_read(hdev, HASH_STR);
 218	reg &= ~(HASH_STR_NBLW_MASK);
 219	reg |= (8U * ((length) % 4U));
 220	stm32_hash_write(hdev, HASH_STR, reg);
 221}
 222
 223static int stm32_hash_write_key(struct stm32_hash_dev *hdev)
 224{
 225	struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
 226	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 227	u32 reg;
 228	int keylen = ctx->keylen;
 229	void *key = ctx->key;
 230
 231	if (keylen) {
 232		stm32_hash_set_nblw(hdev, keylen);
 233
 234		while (keylen > 0) {
 235			stm32_hash_write(hdev, HASH_DIN, *(u32 *)key);
 236			keylen -= 4;
 237			key += 4;
 238		}
 239
 240		reg = stm32_hash_read(hdev, HASH_STR);
 241		reg |= HASH_STR_DCAL;
 242		stm32_hash_write(hdev, HASH_STR, reg);
 243
 244		return -EINPROGRESS;
 245	}
 246
 247	return 0;
 248}
 249
 
 
 
 
 
 250static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev)
 251{
 252	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
 253	struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
 254	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 
 
 255
 256	u32 reg = HASH_CR_INIT;
 257
 258	if (!(hdev->flags & HASH_FLAGS_INIT)) {
 259		switch (rctx->flags & HASH_FLAGS_ALGO_MASK) {
 260		case HASH_FLAGS_MD5:
 261			reg |= HASH_CR_ALGO_MD5;
 262			break;
 263		case HASH_FLAGS_SHA1:
 264			reg |= HASH_CR_ALGO_SHA1;
 265			break;
 266		case HASH_FLAGS_SHA224:
 267			reg |= HASH_CR_ALGO_SHA224;
 268			break;
 269		case HASH_FLAGS_SHA256:
 270			reg |= HASH_CR_ALGO_SHA256;
 271			break;
 272		default:
 273			reg |= HASH_CR_ALGO_MD5;
 274		}
 275
 276		reg |= (rctx->data_type << HASH_CR_DATATYPE_POS);
 277
 278		if (rctx->flags & HASH_FLAGS_HMAC) {
 279			hdev->flags |= HASH_FLAGS_HMAC;
 280			reg |= HASH_CR_MODE;
 281			if (ctx->keylen > HASH_LONG_KEY)
 282				reg |= HASH_CR_LKEY;
 283		}
 284
 285		stm32_hash_write(hdev, HASH_IMR, HASH_DCIE);
 
 286
 287		stm32_hash_write(hdev, HASH_CR, reg);
 288
 289		hdev->flags |= HASH_FLAGS_INIT;
 290
 
 
 
 
 
 
 291		dev_dbg(hdev->dev, "Write Control %x\n", reg);
 292	}
 293}
 294
 295static void stm32_hash_append_sg(struct stm32_hash_request_ctx *rctx)
 296{
 
 297	size_t count;
 298
 299	while ((rctx->bufcnt < rctx->buflen) && rctx->total) {
 300		count = min(rctx->sg->length - rctx->offset, rctx->total);
 301		count = min(count, rctx->buflen - rctx->bufcnt);
 302
 303		if (count <= 0) {
 304			if ((rctx->sg->length == 0) && !sg_is_last(rctx->sg)) {
 305				rctx->sg = sg_next(rctx->sg);
 306				continue;
 307			} else {
 308				break;
 309			}
 310		}
 311
 312		scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, rctx->sg,
 313					 rctx->offset, count, 0);
 314
 315		rctx->bufcnt += count;
 316		rctx->offset += count;
 317		rctx->total -= count;
 318
 319		if (rctx->offset == rctx->sg->length) {
 320			rctx->sg = sg_next(rctx->sg);
 321			if (rctx->sg)
 322				rctx->offset = 0;
 323			else
 324				rctx->total = 0;
 325		}
 326	}
 327}
 328
 329static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev,
 330			       const u8 *buf, size_t length, int final)
 331{
 
 
 332	unsigned int count, len32;
 333	const u32 *buffer = (const u32 *)buf;
 334	u32 reg;
 335
 336	if (final)
 337		hdev->flags |= HASH_FLAGS_FINAL;
 338
 
 
 
 
 
 
 
 
 339	len32 = DIV_ROUND_UP(length, sizeof(u32));
 340
 341	dev_dbg(hdev->dev, "%s: length: %zd, final: %x len32 %i\n",
 342		__func__, length, final, len32);
 343
 344	hdev->flags |= HASH_FLAGS_CPU;
 345
 346	stm32_hash_write_ctrl(hdev);
 347
 348	if (stm32_hash_wait_busy(hdev))
 349		return -ETIMEDOUT;
 350
 351	if ((hdev->flags & HASH_FLAGS_HMAC) &&
 352	    (!(hdev->flags & HASH_FLAGS_HMAC_KEY))) {
 353		hdev->flags |= HASH_FLAGS_HMAC_KEY;
 354		stm32_hash_write_key(hdev);
 355		if (stm32_hash_wait_busy(hdev))
 356			return -ETIMEDOUT;
 357	}
 358
 359	for (count = 0; count < len32; count++)
 360		stm32_hash_write(hdev, HASH_DIN, buffer[count]);
 361
 362	if (final) {
 
 
 
 363		stm32_hash_set_nblw(hdev, length);
 364		reg = stm32_hash_read(hdev, HASH_STR);
 365		reg |= HASH_STR_DCAL;
 366		stm32_hash_write(hdev, HASH_STR, reg);
 367		if (hdev->flags & HASH_FLAGS_HMAC) {
 368			if (stm32_hash_wait_busy(hdev))
 369				return -ETIMEDOUT;
 370			stm32_hash_write_key(hdev);
 371		}
 372		return -EINPROGRESS;
 373	}
 374
 375	return 0;
 376}
 377
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 378static int stm32_hash_update_cpu(struct stm32_hash_dev *hdev)
 379{
 380	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
 
 381	int bufcnt, err = 0, final;
 382
 383	dev_dbg(hdev->dev, "%s flags %lx\n", __func__, rctx->flags);
 384
 385	final = (rctx->flags & HASH_FLAGS_FINUP);
 386
 387	while ((rctx->total >= rctx->buflen) ||
 388	       (rctx->bufcnt + rctx->total >= rctx->buflen)) {
 389		stm32_hash_append_sg(rctx);
 390		bufcnt = rctx->bufcnt;
 391		rctx->bufcnt = 0;
 392		err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt, 0);
 
 
 393	}
 394
 395	stm32_hash_append_sg(rctx);
 396
 397	if (final) {
 398		bufcnt = rctx->bufcnt;
 399		rctx->bufcnt = 0;
 400		err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt,
 401					  (rctx->flags & HASH_FLAGS_FINUP));
 402	}
 403
 404	return err;
 405}
 406
 407static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev,
 408			       struct scatterlist *sg, int length, int mdma)
 409{
 410	struct dma_async_tx_descriptor *in_desc;
 411	dma_cookie_t cookie;
 412	u32 reg;
 413	int err;
 414
 
 
 
 
 
 
 415	in_desc = dmaengine_prep_slave_sg(hdev->dma_lch, sg, 1,
 416					  DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT |
 417					  DMA_CTRL_ACK);
 418	if (!in_desc) {
 419		dev_err(hdev->dev, "dmaengine_prep_slave error\n");
 420		return -ENOMEM;
 421	}
 422
 423	reinit_completion(&hdev->dma_completion);
 424	in_desc->callback = stm32_hash_dma_callback;
 425	in_desc->callback_param = hdev;
 426
 427	hdev->flags |= HASH_FLAGS_FINAL;
 428	hdev->flags |= HASH_FLAGS_DMA_ACTIVE;
 429
 430	reg = stm32_hash_read(hdev, HASH_CR);
 431
 432	if (mdma)
 433		reg |= HASH_CR_MDMAT;
 434	else
 435		reg &= ~HASH_CR_MDMAT;
 436
 
 437	reg |= HASH_CR_DMAE;
 438
 439	stm32_hash_write(hdev, HASH_CR, reg);
 440
 441	stm32_hash_set_nblw(hdev, length);
 442
 443	cookie = dmaengine_submit(in_desc);
 444	err = dma_submit_error(cookie);
 445	if (err)
 446		return -ENOMEM;
 447
 448	dma_async_issue_pending(hdev->dma_lch);
 449
 450	if (!wait_for_completion_timeout(&hdev->dma_completion,
 451					 msecs_to_jiffies(100)))
 452		err = -ETIMEDOUT;
 453
 454	if (dma_async_is_tx_complete(hdev->dma_lch, cookie,
 455				     NULL, NULL) != DMA_COMPLETE)
 456		err = -ETIMEDOUT;
 457
 458	if (err) {
 459		dev_err(hdev->dev, "DMA Error %i\n", err);
 460		dmaengine_terminate_all(hdev->dma_lch);
 461		return err;
 462	}
 463
 464	return -EINPROGRESS;
 465}
 466
 467static void stm32_hash_dma_callback(void *param)
 468{
 469	struct stm32_hash_dev *hdev = param;
 470
 471	complete(&hdev->dma_completion);
 472
 473	hdev->flags |= HASH_FLAGS_DMA_READY;
 474}
 475
 476static int stm32_hash_hmac_dma_send(struct stm32_hash_dev *hdev)
 477{
 478	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
 479	struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
 480	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 481	int err;
 482
 483	if (ctx->keylen < HASH_DMA_THRESHOLD || (hdev->dma_mode == 1)) {
 484		err = stm32_hash_write_key(hdev);
 485		if (stm32_hash_wait_busy(hdev))
 486			return -ETIMEDOUT;
 487	} else {
 488		if (!(hdev->flags & HASH_FLAGS_HMAC_KEY))
 489			sg_init_one(&rctx->sg_key, ctx->key,
 490				    ALIGN(ctx->keylen, sizeof(u32)));
 491
 492		rctx->dma_ct = dma_map_sg(hdev->dev, &rctx->sg_key, 1,
 493					  DMA_TO_DEVICE);
 494		if (rctx->dma_ct == 0) {
 495			dev_err(hdev->dev, "dma_map_sg error\n");
 496			return -ENOMEM;
 497		}
 498
 499		err = stm32_hash_xmit_dma(hdev, &rctx->sg_key, ctx->keylen, 0);
 500
 501		dma_unmap_sg(hdev->dev, &rctx->sg_key, 1, DMA_TO_DEVICE);
 502	}
 503
 504	return err;
 505}
 506
 507static int stm32_hash_dma_init(struct stm32_hash_dev *hdev)
 508{
 509	struct dma_slave_config dma_conf;
 510	struct dma_chan *chan;
 511	int err;
 512
 513	memset(&dma_conf, 0, sizeof(dma_conf));
 514
 515	dma_conf.direction = DMA_MEM_TO_DEV;
 516	dma_conf.dst_addr = hdev->phys_base + HASH_DIN;
 517	dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 518	dma_conf.src_maxburst = hdev->dma_maxburst;
 519	dma_conf.dst_maxburst = hdev->dma_maxburst;
 520	dma_conf.device_fc = false;
 521
 522	chan = dma_request_chan(hdev->dev, "in");
 523	if (IS_ERR(chan))
 524		return PTR_ERR(chan);
 525
 526	hdev->dma_lch = chan;
 527
 528	err = dmaengine_slave_config(hdev->dma_lch, &dma_conf);
 529	if (err) {
 530		dma_release_channel(hdev->dma_lch);
 531		hdev->dma_lch = NULL;
 532		dev_err(hdev->dev, "Couldn't configure DMA slave.\n");
 533		return err;
 534	}
 535
 536	init_completion(&hdev->dma_completion);
 537
 538	return 0;
 539}
 540
 541static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
 542{
 543	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
 
 544	struct scatterlist sg[1], *tsg;
 545	int err = 0, len = 0, reg, ncp = 0;
 546	unsigned int i;
 547	u32 *buffer = (void *)rctx->buffer;
 
 
 548
 549	rctx->sg = hdev->req->src;
 550	rctx->total = hdev->req->nbytes;
 551
 552	rctx->nents = sg_nents(rctx->sg);
 553
 554	if (rctx->nents < 0)
 555		return -EINVAL;
 556
 557	stm32_hash_write_ctrl(hdev);
 558
 559	if (hdev->flags & HASH_FLAGS_HMAC) {
 
 560		err = stm32_hash_hmac_dma_send(hdev);
 561		if (err != -EINPROGRESS)
 562			return err;
 563	}
 564
 565	for_each_sg(rctx->sg, tsg, rctx->nents, i) {
 566		len = sg->length;
 567
 568		sg[0] = *tsg;
 569		if (sg_is_last(sg)) {
 570			if (hdev->dma_mode == 1) {
 571				len = (ALIGN(sg->length, 16) - 16);
 572
 573				ncp = sg_pcopy_to_buffer(
 574					rctx->sg, rctx->nents,
 575					rctx->buffer, sg->length - len,
 576					rctx->total - sg->length + len);
 577
 578				sg->length = len;
 
 
 
 
 
 579			} else {
 580				if (!(IS_ALIGNED(sg->length, sizeof(u32)))) {
 581					len = sg->length;
 582					sg->length = ALIGN(sg->length,
 583							   sizeof(u32));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 584				}
 585			}
 586		}
 587
 588		rctx->dma_ct = dma_map_sg(hdev->dev, sg, 1,
 589					  DMA_TO_DEVICE);
 590		if (rctx->dma_ct == 0) {
 591			dev_err(hdev->dev, "dma_map_sg error\n");
 592			return -ENOMEM;
 593		}
 594
 595		err = stm32_hash_xmit_dma(hdev, sg, len,
 596					  !sg_is_last(sg));
 
 
 
 
 
 597
 
 
 
 598		dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
 599
 600		if (err == -ENOMEM)
 601			return err;
 
 
 602	}
 603
 604	if (hdev->dma_mode == 1) {
 605		if (stm32_hash_wait_busy(hdev))
 606			return -ETIMEDOUT;
 607		reg = stm32_hash_read(hdev, HASH_CR);
 608		reg &= ~HASH_CR_DMAE;
 609		reg |= HASH_CR_DMAA;
 610		stm32_hash_write(hdev, HASH_CR, reg);
 
 
 
 
 
 
 
 
 
 
 
 
 611
 612		if (ncp) {
 613			memset(buffer + ncp, 0,
 614			       DIV_ROUND_UP(ncp, sizeof(u32)) - ncp);
 615			writesl(hdev->io_base + HASH_DIN, buffer,
 616				DIV_ROUND_UP(ncp, sizeof(u32)));
 617		}
 618		stm32_hash_set_nblw(hdev, ncp);
 619		reg = stm32_hash_read(hdev, HASH_STR);
 620		reg |= HASH_STR_DCAL;
 621		stm32_hash_write(hdev, HASH_STR, reg);
 622		err = -EINPROGRESS;
 623	}
 624
 625	if (hdev->flags & HASH_FLAGS_HMAC) {
 626		if (stm32_hash_wait_busy(hdev))
 627			return -ETIMEDOUT;
 628		err = stm32_hash_hmac_dma_send(hdev);
 
 
 
 
 
 
 
 629	}
 630
 631	return err;
 
 
 
 632}
 633
 634static struct stm32_hash_dev *stm32_hash_find_dev(struct stm32_hash_ctx *ctx)
 635{
 636	struct stm32_hash_dev *hdev = NULL, *tmp;
 637
 638	spin_lock_bh(&stm32_hash.lock);
 639	if (!ctx->hdev) {
 640		list_for_each_entry(tmp, &stm32_hash.dev_list, list) {
 641			hdev = tmp;
 642			break;
 643		}
 644		ctx->hdev = hdev;
 645	} else {
 646		hdev = ctx->hdev;
 647	}
 648
 649	spin_unlock_bh(&stm32_hash.lock);
 650
 651	return hdev;
 652}
 653
 654static bool stm32_hash_dma_aligned_data(struct ahash_request *req)
 655{
 656	struct scatterlist *sg;
 657	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
 658	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
 659	int i;
 660
 661	if (req->nbytes <= HASH_DMA_THRESHOLD)
 662		return false;
 663
 664	if (sg_nents(req->src) > 1) {
 665		if (hdev->dma_mode == 1)
 666			return false;
 667		for_each_sg(req->src, sg, sg_nents(req->src), i) {
 668			if ((!IS_ALIGNED(sg->length, sizeof(u32))) &&
 669			    (!sg_is_last(sg)))
 670				return false;
 671		}
 672	}
 673
 674	if (req->src->offset % 4)
 675		return false;
 676
 677	return true;
 678}
 679
 680static int stm32_hash_init(struct ahash_request *req)
 681{
 682	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 683	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 684	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
 685	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
 
 
 686
 687	rctx->hdev = hdev;
 
 688
 689	rctx->flags = HASH_FLAGS_CPU;
 
 
 
 
 690
 691	rctx->digcnt = crypto_ahash_digestsize(tfm);
 692	switch (rctx->digcnt) {
 693	case MD5_DIGEST_SIZE:
 694		rctx->flags |= HASH_FLAGS_MD5;
 695		break;
 696	case SHA1_DIGEST_SIZE:
 697		rctx->flags |= HASH_FLAGS_SHA1;
 
 
 
 698		break;
 699	case SHA224_DIGEST_SIZE:
 700		rctx->flags |= HASH_FLAGS_SHA224;
 
 
 
 701		break;
 702	case SHA256_DIGEST_SIZE:
 703		rctx->flags |= HASH_FLAGS_SHA256;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 704		break;
 705	default:
 706		return -EINVAL;
 707	}
 708
 709	rctx->bufcnt = 0;
 710	rctx->buflen = HASH_BUFLEN;
 
 
 
 
 
 711	rctx->total = 0;
 712	rctx->offset = 0;
 713	rctx->data_type = HASH_DATA_8_BITS;
 714
 715	memset(rctx->buffer, 0, HASH_BUFLEN);
 716
 717	if (ctx->flags & HASH_FLAGS_HMAC)
 718		rctx->flags |= HASH_FLAGS_HMAC;
 719
 720	dev_dbg(hdev->dev, "%s Flags %lx\n", __func__, rctx->flags);
 721
 722	return 0;
 723}
 724
 725static int stm32_hash_update_req(struct stm32_hash_dev *hdev)
 726{
 
 
 
 
 
 
 
 
 
 727	return stm32_hash_update_cpu(hdev);
 728}
 729
 730static int stm32_hash_final_req(struct stm32_hash_dev *hdev)
 731{
 732	struct ahash_request *req = hdev->req;
 733	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
 734	int err;
 735	int buflen = rctx->bufcnt;
 736
 737	rctx->bufcnt = 0;
 
 
 
 738
 739	if (!(rctx->flags & HASH_FLAGS_CPU))
 740		err = stm32_hash_dma_send(hdev);
 741	else
 742		err = stm32_hash_xmit_cpu(hdev, rctx->buffer, buflen, 1);
 743
 
 744
 745	return err;
 746}
 747
 748static void stm32_hash_copy_hash(struct ahash_request *req)
 749{
 
 
 750	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
 751	u32 *hash = (u32 *)rctx->digest;
 752	unsigned int i, hashsize;
 753
 754	switch (rctx->flags & HASH_FLAGS_ALGO_MASK) {
 755	case HASH_FLAGS_MD5:
 756		hashsize = MD5_DIGEST_SIZE;
 757		break;
 758	case HASH_FLAGS_SHA1:
 759		hashsize = SHA1_DIGEST_SIZE;
 760		break;
 761	case HASH_FLAGS_SHA224:
 762		hashsize = SHA224_DIGEST_SIZE;
 763		break;
 764	case HASH_FLAGS_SHA256:
 765		hashsize = SHA256_DIGEST_SIZE;
 766		break;
 767	default:
 768		return;
 769	}
 770
 771	for (i = 0; i < hashsize / sizeof(u32); i++)
 772		hash[i] = be32_to_cpu(stm32_hash_read(rctx->hdev,
 773						      HASH_HREG(i)));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 774}
 775
 776static int stm32_hash_finish(struct ahash_request *req)
 777{
 778	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
 
 
 
 
 
 779
 780	if (!req->result)
 781		return -EINVAL;
 782
 783	memcpy(req->result, rctx->digest, rctx->digcnt);
 784
 785	return 0;
 786}
 787
 788static void stm32_hash_finish_req(struct ahash_request *req, int err)
 789{
 790	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
 
 791	struct stm32_hash_dev *hdev = rctx->hdev;
 792
 
 
 
 
 
 793	if (!err && (HASH_FLAGS_FINAL & hdev->flags)) {
 794		stm32_hash_copy_hash(req);
 795		err = stm32_hash_finish(req);
 796		hdev->flags &= ~(HASH_FLAGS_FINAL | HASH_FLAGS_CPU |
 797				 HASH_FLAGS_INIT | HASH_FLAGS_DMA_READY |
 798				 HASH_FLAGS_OUTPUT_READY | HASH_FLAGS_HMAC |
 799				 HASH_FLAGS_HMAC_INIT | HASH_FLAGS_HMAC_FINAL |
 800				 HASH_FLAGS_HMAC_KEY);
 801	} else {
 802		rctx->flags |= HASH_FLAGS_ERRORS;
 803	}
 804
 805	pm_runtime_mark_last_busy(hdev->dev);
 806	pm_runtime_put_autosuspend(hdev->dev);
 807
 808	crypto_finalize_hash_request(hdev->engine, req, err);
 809}
 810
 811static int stm32_hash_hw_init(struct stm32_hash_dev *hdev,
 812			      struct stm32_hash_request_ctx *rctx)
 813{
 814	pm_runtime_get_sync(hdev->dev);
 815
 816	if (!(HASH_FLAGS_INIT & hdev->flags)) {
 817		stm32_hash_write(hdev, HASH_CR, HASH_CR_INIT);
 818		stm32_hash_write(hdev, HASH_STR, 0);
 819		stm32_hash_write(hdev, HASH_DIN, 0);
 820		stm32_hash_write(hdev, HASH_IMR, 0);
 821		hdev->err = 0;
 822	}
 823
 824	return 0;
 825}
 826
 827static int stm32_hash_one_request(struct crypto_engine *engine, void *areq);
 828static int stm32_hash_prepare_req(struct crypto_engine *engine, void *areq);
 829
 830static int stm32_hash_handle_queue(struct stm32_hash_dev *hdev,
 831				   struct ahash_request *req)
 832{
 833	return crypto_transfer_hash_request_to_engine(hdev->engine, req);
 834}
 835
 836static int stm32_hash_prepare_req(struct crypto_engine *engine, void *areq)
 837{
 838	struct ahash_request *req = container_of(areq, struct ahash_request,
 839						 base);
 840	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
 
 841	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
 842	struct stm32_hash_request_ctx *rctx;
 
 
 843
 844	if (!hdev)
 845		return -ENODEV;
 846
 847	hdev->req = req;
 848
 849	rctx = ahash_request_ctx(req);
 850
 851	dev_dbg(hdev->dev, "processing new req, op: %lu, nbytes %d\n",
 852		rctx->op, req->nbytes);
 853
 854	return stm32_hash_hw_init(hdev, rctx);
 855}
 856
 857static int stm32_hash_one_request(struct crypto_engine *engine, void *areq)
 858{
 859	struct ahash_request *req = container_of(areq, struct ahash_request,
 860						 base);
 861	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
 862	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
 863	struct stm32_hash_request_ctx *rctx;
 864	int err = 0;
 865
 866	if (!hdev)
 867		return -ENODEV;
 
 868
 869	hdev->req = req;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 870
 871	rctx = ahash_request_ctx(req);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 872
 873	if (rctx->op == HASH_OP_UPDATE)
 874		err = stm32_hash_update_req(hdev);
 875	else if (rctx->op == HASH_OP_FINAL)
 876		err = stm32_hash_final_req(hdev);
 877
 
 
 
 
 
 
 
 
 
 
 878	if (err != -EINPROGRESS)
 879	/* done task will not finish it, so do it here */
 880		stm32_hash_finish_req(req, err);
 881
 882	return 0;
 883}
 884
 885static int stm32_hash_enqueue(struct ahash_request *req, unsigned int op)
 886{
 887	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
 888	struct stm32_hash_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
 889	struct stm32_hash_dev *hdev = ctx->hdev;
 
 
 
 
 
 
 
 
 
 
 890
 891	rctx->op = op;
 
 892
 893	return stm32_hash_handle_queue(hdev, req);
 
 
 
 
 
 
 
 
 
 
 
 894}
 895
 896static int stm32_hash_update(struct ahash_request *req)
 
 
 897{
 898	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 899
 900	if (!req->nbytes || !(rctx->flags & HASH_FLAGS_CPU))
 901		return 0;
 
 902
 903	rctx->total = req->nbytes;
 904	rctx->sg = req->src;
 905	rctx->offset = 0;
 906
 907	if ((rctx->bufcnt + rctx->total < rctx->buflen)) {
 908		stm32_hash_append_sg(rctx);
 909		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 910	}
 911
 912	return stm32_hash_enqueue(req, HASH_OP_UPDATE);
 913}
 914
 915static int stm32_hash_final(struct ahash_request *req)
 916{
 917	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
 918
 919	rctx->flags |= HASH_FLAGS_FINUP;
 
 920
 921	return stm32_hash_enqueue(req, HASH_OP_FINAL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 922}
 923
 924static int stm32_hash_finup(struct ahash_request *req)
 925{
 
 
 926	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
 927	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
 928	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
 929	int err1, err2;
 930
 931	rctx->flags |= HASH_FLAGS_FINUP;
 
 
 
 
 932
 933	if (hdev->dma_lch && stm32_hash_dma_aligned_data(req))
 934		rctx->flags &= ~HASH_FLAGS_CPU;
 935
 936	err1 = stm32_hash_update(req);
 937
 938	if (err1 == -EINPROGRESS || err1 == -EBUSY)
 939		return err1;
 940
 941	/*
 942	 * final() has to be always called to cleanup resources
 943	 * even if update() failed, except EINPROGRESS
 
 944	 */
 945	err2 = stm32_hash_final(req);
 
 946
 947	return err1 ?: err2;
 948}
 
 949
 950static int stm32_hash_digest(struct ahash_request *req)
 951{
 952	return stm32_hash_init(req) ?: stm32_hash_finup(req);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 953}
 954
 955static int stm32_hash_export(struct ahash_request *req, void *out)
 956{
 957	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
 
 958	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
 959	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
 960	u32 *preg;
 961	unsigned int i;
 962
 963	pm_runtime_get_sync(hdev->dev);
 
 964
 965	while ((stm32_hash_read(hdev, HASH_SR) & HASH_SR_BUSY))
 966		cpu_relax();
 967
 968	rctx->hw_context = kmalloc_array(3 + HASH_CSR_REGISTER_NUMBER,
 969					 sizeof(u32),
 970					 GFP_KERNEL);
 971
 972	preg = rctx->hw_context;
 973
 974	*preg++ = stm32_hash_read(hdev, HASH_IMR);
 
 
 
 
 
 
 
 
 
 
 
 
 
 975	*preg++ = stm32_hash_read(hdev, HASH_STR);
 976	*preg++ = stm32_hash_read(hdev, HASH_CR);
 977	for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
 978		*preg++ = stm32_hash_read(hdev, HASH_CSR(i));
 979
 
 980	pm_runtime_mark_last_busy(hdev->dev);
 981	pm_runtime_put_autosuspend(hdev->dev);
 
 
 
 
 
 
 
 982
 983	memcpy(out, rctx, sizeof(*rctx));
 984
 985	return 0;
 986}
 987
 988static int stm32_hash_import(struct ahash_request *req, const void *in)
 989{
 990	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
 991	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
 992	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
 993	const u32 *preg = in;
 994	u32 reg;
 995	unsigned int i;
 996
 997	memcpy(rctx, in, sizeof(*rctx));
 
 998
 999	preg = rctx->hw_context;
1000
1001	pm_runtime_get_sync(hdev->dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1002
1003	stm32_hash_write(hdev, HASH_IMR, *preg++);
1004	stm32_hash_write(hdev, HASH_STR, *preg++);
1005	stm32_hash_write(hdev, HASH_CR, *preg);
1006	reg = *preg++ | HASH_CR_INIT;
1007	stm32_hash_write(hdev, HASH_CR, reg);
 
 
1008
1009	for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
1010		stm32_hash_write(hdev, HASH_CSR(i), *preg++);
1011
1012	pm_runtime_mark_last_busy(hdev->dev);
1013	pm_runtime_put_autosuspend(hdev->dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1014
1015	kfree(rctx->hw_context);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1016
1017	return 0;
1018}
1019
1020static int stm32_hash_setkey(struct crypto_ahash *tfm,
1021			     const u8 *key, unsigned int keylen)
1022{
1023	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1024
1025	if (keylen <= HASH_MAX_KEY_SIZE) {
1026		memcpy(ctx->key, key, keylen);
1027		ctx->keylen = keylen;
1028	} else {
1029		return -ENOMEM;
1030	}
1031
1032	return 0;
1033}
1034
1035static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm,
1036				    const char *algs_hmac_name)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1037{
1038	struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1039
1040	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1041				 sizeof(struct stm32_hash_request_ctx));
1042
1043	ctx->keylen = 0;
1044
1045	if (algs_hmac_name)
1046		ctx->flags |= HASH_FLAGS_HMAC;
1047
1048	ctx->enginectx.op.do_one_request = stm32_hash_one_request;
1049	ctx->enginectx.op.prepare_request = stm32_hash_prepare_req;
1050	ctx->enginectx.op.unprepare_request = NULL;
1051	return 0;
1052}
1053
1054static int stm32_hash_cra_init(struct crypto_tfm *tfm)
1055{
1056	return stm32_hash_cra_init_algs(tfm, NULL);
1057}
1058
1059static int stm32_hash_cra_md5_init(struct crypto_tfm *tfm)
1060{
1061	return stm32_hash_cra_init_algs(tfm, "md5");
1062}
1063
1064static int stm32_hash_cra_sha1_init(struct crypto_tfm *tfm)
1065{
1066	return stm32_hash_cra_init_algs(tfm, "sha1");
1067}
1068
1069static int stm32_hash_cra_sha224_init(struct crypto_tfm *tfm)
1070{
1071	return stm32_hash_cra_init_algs(tfm, "sha224");
 
1072}
1073
1074static int stm32_hash_cra_sha256_init(struct crypto_tfm *tfm)
1075{
1076	return stm32_hash_cra_init_algs(tfm, "sha256");
 
 
 
1077}
1078
1079static irqreturn_t stm32_hash_irq_thread(int irq, void *dev_id)
1080{
1081	struct stm32_hash_dev *hdev = dev_id;
1082
1083	if (HASH_FLAGS_CPU & hdev->flags) {
1084		if (HASH_FLAGS_OUTPUT_READY & hdev->flags) {
1085			hdev->flags &= ~HASH_FLAGS_OUTPUT_READY;
1086			goto finish;
1087		}
1088	} else if (HASH_FLAGS_DMA_READY & hdev->flags) {
1089		if (HASH_FLAGS_DMA_ACTIVE & hdev->flags) {
1090			hdev->flags &= ~HASH_FLAGS_DMA_ACTIVE;
1091				goto finish;
1092		}
1093	}
1094
1095	return IRQ_HANDLED;
1096
1097finish:
1098	/* Finish current request */
1099	stm32_hash_finish_req(hdev->req, 0);
1100
1101	return IRQ_HANDLED;
1102}
1103
1104static irqreturn_t stm32_hash_irq_handler(int irq, void *dev_id)
1105{
1106	struct stm32_hash_dev *hdev = dev_id;
1107	u32 reg;
1108
1109	reg = stm32_hash_read(hdev, HASH_SR);
1110	if (reg & HASH_SR_OUTPUT_READY) {
1111		reg &= ~HASH_SR_OUTPUT_READY;
1112		stm32_hash_write(hdev, HASH_SR, reg);
1113		hdev->flags |= HASH_FLAGS_OUTPUT_READY;
1114		/* Disable IT*/
1115		stm32_hash_write(hdev, HASH_IMR, 0);
1116		return IRQ_WAKE_THREAD;
1117	}
1118
1119	return IRQ_NONE;
1120}
1121
1122static struct ahash_alg algs_md5_sha1[] = {
1123	{
1124		.init = stm32_hash_init,
1125		.update = stm32_hash_update,
1126		.final = stm32_hash_final,
1127		.finup = stm32_hash_finup,
1128		.digest = stm32_hash_digest,
1129		.export = stm32_hash_export,
1130		.import = stm32_hash_import,
1131		.halg = {
1132			.digestsize = MD5_DIGEST_SIZE,
1133			.statesize = sizeof(struct stm32_hash_request_ctx),
1134			.base = {
1135				.cra_name = "md5",
1136				.cra_driver_name = "stm32-md5",
1137				.cra_priority = 200,
1138				.cra_flags = CRYPTO_ALG_ASYNC |
1139					CRYPTO_ALG_KERN_DRIVER_ONLY,
1140				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1141				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1142				.cra_alignmask = 3,
1143				.cra_init = stm32_hash_cra_init,
 
1144				.cra_module = THIS_MODULE,
1145			}
1146		}
 
 
 
1147	},
1148	{
1149		.init = stm32_hash_init,
1150		.update = stm32_hash_update,
1151		.final = stm32_hash_final,
1152		.finup = stm32_hash_finup,
1153		.digest = stm32_hash_digest,
1154		.export = stm32_hash_export,
1155		.import = stm32_hash_import,
1156		.setkey = stm32_hash_setkey,
1157		.halg = {
1158			.digestsize = MD5_DIGEST_SIZE,
1159			.statesize = sizeof(struct stm32_hash_request_ctx),
1160			.base = {
1161				.cra_name = "hmac(md5)",
1162				.cra_driver_name = "stm32-hmac-md5",
1163				.cra_priority = 200,
1164				.cra_flags = CRYPTO_ALG_ASYNC |
1165					CRYPTO_ALG_KERN_DRIVER_ONLY,
1166				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1167				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1168				.cra_alignmask = 3,
1169				.cra_init = stm32_hash_cra_md5_init,
1170				.cra_module = THIS_MODULE,
1171			}
1172		}
1173	},
 
 
 
 
 
 
1174	{
1175		.init = stm32_hash_init,
1176		.update = stm32_hash_update,
1177		.final = stm32_hash_final,
1178		.finup = stm32_hash_finup,
1179		.digest = stm32_hash_digest,
1180		.export = stm32_hash_export,
1181		.import = stm32_hash_import,
1182		.halg = {
1183			.digestsize = SHA1_DIGEST_SIZE,
1184			.statesize = sizeof(struct stm32_hash_request_ctx),
1185			.base = {
1186				.cra_name = "sha1",
1187				.cra_driver_name = "stm32-sha1",
1188				.cra_priority = 200,
1189				.cra_flags = CRYPTO_ALG_ASYNC |
1190					CRYPTO_ALG_KERN_DRIVER_ONLY,
1191				.cra_blocksize = SHA1_BLOCK_SIZE,
1192				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1193				.cra_alignmask = 3,
1194				.cra_init = stm32_hash_cra_init,
 
1195				.cra_module = THIS_MODULE,
1196			}
1197		}
 
 
 
1198	},
1199	{
1200		.init = stm32_hash_init,
1201		.update = stm32_hash_update,
1202		.final = stm32_hash_final,
1203		.finup = stm32_hash_finup,
1204		.digest = stm32_hash_digest,
1205		.export = stm32_hash_export,
1206		.import = stm32_hash_import,
1207		.setkey = stm32_hash_setkey,
1208		.halg = {
1209			.digestsize = SHA1_DIGEST_SIZE,
1210			.statesize = sizeof(struct stm32_hash_request_ctx),
1211			.base = {
1212				.cra_name = "hmac(sha1)",
1213				.cra_driver_name = "stm32-hmac-sha1",
1214				.cra_priority = 200,
1215				.cra_flags = CRYPTO_ALG_ASYNC |
1216					CRYPTO_ALG_KERN_DRIVER_ONLY,
1217				.cra_blocksize = SHA1_BLOCK_SIZE,
1218				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1219				.cra_alignmask = 3,
1220				.cra_init = stm32_hash_cra_sha1_init,
1221				.cra_module = THIS_MODULE,
1222			}
1223		}
 
 
 
1224	},
1225};
1226
1227static struct ahash_alg algs_sha224_sha256[] = {
1228	{
1229		.init = stm32_hash_init,
1230		.update = stm32_hash_update,
1231		.final = stm32_hash_final,
1232		.finup = stm32_hash_finup,
1233		.digest = stm32_hash_digest,
1234		.export = stm32_hash_export,
1235		.import = stm32_hash_import,
1236		.halg = {
1237			.digestsize = SHA224_DIGEST_SIZE,
1238			.statesize = sizeof(struct stm32_hash_request_ctx),
1239			.base = {
1240				.cra_name = "sha224",
1241				.cra_driver_name = "stm32-sha224",
1242				.cra_priority = 200,
1243				.cra_flags = CRYPTO_ALG_ASYNC |
1244					CRYPTO_ALG_KERN_DRIVER_ONLY,
1245				.cra_blocksize = SHA224_BLOCK_SIZE,
1246				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1247				.cra_alignmask = 3,
1248				.cra_init = stm32_hash_cra_init,
 
1249				.cra_module = THIS_MODULE,
1250			}
1251		}
 
 
 
1252	},
1253	{
1254		.init = stm32_hash_init,
1255		.update = stm32_hash_update,
1256		.final = stm32_hash_final,
1257		.finup = stm32_hash_finup,
1258		.digest = stm32_hash_digest,
1259		.setkey = stm32_hash_setkey,
1260		.export = stm32_hash_export,
1261		.import = stm32_hash_import,
1262		.halg = {
1263			.digestsize = SHA224_DIGEST_SIZE,
1264			.statesize = sizeof(struct stm32_hash_request_ctx),
1265			.base = {
1266				.cra_name = "hmac(sha224)",
1267				.cra_driver_name = "stm32-hmac-sha224",
1268				.cra_priority = 200,
1269				.cra_flags = CRYPTO_ALG_ASYNC |
1270					CRYPTO_ALG_KERN_DRIVER_ONLY,
1271				.cra_blocksize = SHA224_BLOCK_SIZE,
1272				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1273				.cra_alignmask = 3,
1274				.cra_init = stm32_hash_cra_sha224_init,
1275				.cra_module = THIS_MODULE,
1276			}
1277		}
 
 
 
1278	},
 
 
 
1279	{
1280		.init = stm32_hash_init,
1281		.update = stm32_hash_update,
1282		.final = stm32_hash_final,
1283		.finup = stm32_hash_finup,
1284		.digest = stm32_hash_digest,
1285		.export = stm32_hash_export,
1286		.import = stm32_hash_import,
1287		.halg = {
1288			.digestsize = SHA256_DIGEST_SIZE,
1289			.statesize = sizeof(struct stm32_hash_request_ctx),
1290			.base = {
1291				.cra_name = "sha256",
1292				.cra_driver_name = "stm32-sha256",
1293				.cra_priority = 200,
1294				.cra_flags = CRYPTO_ALG_ASYNC |
1295					CRYPTO_ALG_KERN_DRIVER_ONLY,
1296				.cra_blocksize = SHA256_BLOCK_SIZE,
1297				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1298				.cra_alignmask = 3,
1299				.cra_init = stm32_hash_cra_init,
 
1300				.cra_module = THIS_MODULE,
1301			}
1302		}
 
 
 
1303	},
1304	{
1305		.init = stm32_hash_init,
1306		.update = stm32_hash_update,
1307		.final = stm32_hash_final,
1308		.finup = stm32_hash_finup,
1309		.digest = stm32_hash_digest,
1310		.export = stm32_hash_export,
1311		.import = stm32_hash_import,
1312		.setkey = stm32_hash_setkey,
1313		.halg = {
1314			.digestsize = SHA256_DIGEST_SIZE,
1315			.statesize = sizeof(struct stm32_hash_request_ctx),
1316			.base = {
1317				.cra_name = "hmac(sha256)",
1318				.cra_driver_name = "stm32-hmac-sha256",
1319				.cra_priority = 200,
1320				.cra_flags = CRYPTO_ALG_ASYNC |
1321					CRYPTO_ALG_KERN_DRIVER_ONLY,
1322				.cra_blocksize = SHA256_BLOCK_SIZE,
1323				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1324				.cra_alignmask = 3,
1325				.cra_init = stm32_hash_cra_sha256_init,
1326				.cra_module = THIS_MODULE,
1327			}
1328		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1329	},
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1330};
1331
1332static int stm32_hash_register_algs(struct stm32_hash_dev *hdev)
1333{
1334	unsigned int i, j;
1335	int err;
1336
1337	for (i = 0; i < hdev->pdata->algs_info_size; i++) {
1338		for (j = 0; j < hdev->pdata->algs_info[i].size; j++) {
1339			err = crypto_register_ahash(
1340				&hdev->pdata->algs_info[i].algs_list[j]);
1341			if (err)
1342				goto err_algs;
1343		}
1344	}
1345
1346	return 0;
1347err_algs:
1348	dev_err(hdev->dev, "Algo %d : %d failed\n", i, j);
1349	for (; i--; ) {
1350		for (; j--;)
1351			crypto_unregister_ahash(
1352				&hdev->pdata->algs_info[i].algs_list[j]);
1353	}
1354
1355	return err;
1356}
1357
1358static int stm32_hash_unregister_algs(struct stm32_hash_dev *hdev)
1359{
1360	unsigned int i, j;
1361
1362	for (i = 0; i < hdev->pdata->algs_info_size; i++) {
1363		for (j = 0; j < hdev->pdata->algs_info[i].size; j++)
1364			crypto_unregister_ahash(
1365				&hdev->pdata->algs_info[i].algs_list[j]);
1366	}
1367
1368	return 0;
1369}
1370
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1371static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f4[] = {
1372	{
1373		.algs_list	= algs_md5_sha1,
1374		.size		= ARRAY_SIZE(algs_md5_sha1),
 
 
 
 
1375	},
1376};
1377
1378static const struct stm32_hash_pdata stm32_hash_pdata_stm32f4 = {
 
1379	.algs_info	= stm32_hash_algs_info_stm32f4,
1380	.algs_info_size	= ARRAY_SIZE(stm32_hash_algs_info_stm32f4),
 
 
1381};
1382
1383static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f7[] = {
1384	{
1385		.algs_list	= algs_md5_sha1,
1386		.size		= ARRAY_SIZE(algs_md5_sha1),
1387	},
1388	{
1389		.algs_list	= algs_sha224_sha256,
1390		.size		= ARRAY_SIZE(algs_sha224_sha256),
 
 
 
 
 
 
 
 
1391	},
1392};
1393
1394static const struct stm32_hash_pdata stm32_hash_pdata_stm32f7 = {
 
1395	.algs_info	= stm32_hash_algs_info_stm32f7,
1396	.algs_info_size	= ARRAY_SIZE(stm32_hash_algs_info_stm32f7),
 
 
1397};
1398
1399static const struct of_device_id stm32_hash_of_match[] = {
 
 
 
 
 
 
 
 
1400	{
1401		.compatible = "st,stm32f456-hash",
1402		.data = &stm32_hash_pdata_stm32f4,
1403	},
1404	{
1405		.compatible = "st,stm32f756-hash",
1406		.data = &stm32_hash_pdata_stm32f7,
1407	},
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1408	{},
1409};
1410
1411MODULE_DEVICE_TABLE(of, stm32_hash_of_match);
1412
1413static int stm32_hash_get_of_match(struct stm32_hash_dev *hdev,
1414				   struct device *dev)
1415{
1416	hdev->pdata = of_device_get_match_data(dev);
1417	if (!hdev->pdata) {
1418		dev_err(dev, "no compatible OF match\n");
1419		return -EINVAL;
1420	}
1421
1422	if (of_property_read_u32(dev->of_node, "dma-maxburst",
1423				 &hdev->dma_maxburst)) {
1424		dev_info(dev, "dma-maxburst not specified, using 0\n");
1425		hdev->dma_maxburst = 0;
1426	}
1427
1428	return 0;
1429}
1430
1431static int stm32_hash_probe(struct platform_device *pdev)
1432{
1433	struct stm32_hash_dev *hdev;
1434	struct device *dev = &pdev->dev;
1435	struct resource *res;
1436	int ret, irq;
1437
1438	hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL);
1439	if (!hdev)
1440		return -ENOMEM;
1441
1442	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1443	hdev->io_base = devm_ioremap_resource(dev, res);
1444	if (IS_ERR(hdev->io_base))
1445		return PTR_ERR(hdev->io_base);
1446
1447	hdev->phys_base = res->start;
1448
1449	ret = stm32_hash_get_of_match(hdev, dev);
1450	if (ret)
1451		return ret;
1452
1453	irq = platform_get_irq(pdev, 0);
1454	if (irq < 0)
1455		return irq;
1456
1457	ret = devm_request_threaded_irq(dev, irq, stm32_hash_irq_handler,
1458					stm32_hash_irq_thread, IRQF_ONESHOT,
1459					dev_name(dev), hdev);
1460	if (ret) {
1461		dev_err(dev, "Cannot grab IRQ\n");
1462		return ret;
 
 
 
 
 
 
 
1463	}
1464
1465	hdev->clk = devm_clk_get(&pdev->dev, NULL);
1466	if (IS_ERR(hdev->clk)) {
1467		if (PTR_ERR(hdev->clk) != -EPROBE_DEFER) {
1468			dev_err(dev, "failed to get clock for hash (%lu)\n",
1469				PTR_ERR(hdev->clk));
1470		}
1471
1472		return PTR_ERR(hdev->clk);
1473	}
1474
1475	ret = clk_prepare_enable(hdev->clk);
1476	if (ret) {
1477		dev_err(dev, "failed to enable hash clock (%d)\n", ret);
1478		return ret;
1479	}
1480
1481	pm_runtime_set_autosuspend_delay(dev, HASH_AUTOSUSPEND_DELAY);
1482	pm_runtime_use_autosuspend(dev);
1483
1484	pm_runtime_get_noresume(dev);
1485	pm_runtime_set_active(dev);
1486	pm_runtime_enable(dev);
1487
1488	hdev->rst = devm_reset_control_get(&pdev->dev, NULL);
1489	if (IS_ERR(hdev->rst)) {
1490		if (PTR_ERR(hdev->rst) == -EPROBE_DEFER) {
1491			ret = -EPROBE_DEFER;
1492			goto err_reset;
1493		}
1494	} else {
1495		reset_control_assert(hdev->rst);
1496		udelay(2);
1497		reset_control_deassert(hdev->rst);
1498	}
1499
1500	hdev->dev = dev;
1501
1502	platform_set_drvdata(pdev, hdev);
1503
1504	ret = stm32_hash_dma_init(hdev);
1505	switch (ret) {
1506	case 0:
1507		break;
1508	case -ENOENT:
1509		dev_dbg(dev, "DMA mode not available\n");
 
1510		break;
1511	default:
 
1512		goto err_dma;
1513	}
1514
1515	spin_lock(&stm32_hash.lock);
1516	list_add_tail(&hdev->list, &stm32_hash.dev_list);
1517	spin_unlock(&stm32_hash.lock);
1518
1519	/* Initialize crypto engine */
1520	hdev->engine = crypto_engine_alloc_init(dev, 1);
1521	if (!hdev->engine) {
1522		ret = -ENOMEM;
1523		goto err_engine;
1524	}
1525
1526	ret = crypto_engine_start(hdev->engine);
1527	if (ret)
1528		goto err_engine_start;
1529
1530	hdev->dma_mode = stm32_hash_read(hdev, HASH_HWCFGR);
 
 
 
 
1531
1532	/* Register algos */
1533	ret = stm32_hash_register_algs(hdev);
1534	if (ret)
1535		goto err_algs;
1536
1537	dev_info(dev, "Init HASH done HW ver %x DMA mode %u\n",
1538		 stm32_hash_read(hdev, HASH_VER), hdev->dma_mode);
1539
1540	pm_runtime_put_sync(dev);
1541
1542	return 0;
1543
1544err_algs:
1545err_engine_start:
1546	crypto_engine_exit(hdev->engine);
1547err_engine:
1548	spin_lock(&stm32_hash.lock);
1549	list_del(&hdev->list);
1550	spin_unlock(&stm32_hash.lock);
1551err_dma:
1552	if (hdev->dma_lch)
1553		dma_release_channel(hdev->dma_lch);
1554err_reset:
1555	pm_runtime_disable(dev);
1556	pm_runtime_put_noidle(dev);
1557
1558	clk_disable_unprepare(hdev->clk);
1559
1560	return ret;
1561}
1562
1563static int stm32_hash_remove(struct platform_device *pdev)
1564{
1565	struct stm32_hash_dev *hdev;
1566	int ret;
1567
1568	hdev = platform_get_drvdata(pdev);
1569	if (!hdev)
1570		return -ENODEV;
1571
1572	ret = pm_runtime_get_sync(hdev->dev);
1573	if (ret < 0)
1574		return ret;
1575
1576	stm32_hash_unregister_algs(hdev);
1577
1578	crypto_engine_exit(hdev->engine);
1579
1580	spin_lock(&stm32_hash.lock);
1581	list_del(&hdev->list);
1582	spin_unlock(&stm32_hash.lock);
1583
1584	if (hdev->dma_lch)
1585		dma_release_channel(hdev->dma_lch);
1586
1587	pm_runtime_disable(hdev->dev);
1588	pm_runtime_put_noidle(hdev->dev);
1589
1590	clk_disable_unprepare(hdev->clk);
1591
1592	return 0;
1593}
1594
1595#ifdef CONFIG_PM
1596static int stm32_hash_runtime_suspend(struct device *dev)
1597{
1598	struct stm32_hash_dev *hdev = dev_get_drvdata(dev);
1599
1600	clk_disable_unprepare(hdev->clk);
1601
1602	return 0;
1603}
1604
1605static int stm32_hash_runtime_resume(struct device *dev)
1606{
1607	struct stm32_hash_dev *hdev = dev_get_drvdata(dev);
1608	int ret;
1609
1610	ret = clk_prepare_enable(hdev->clk);
1611	if (ret) {
1612		dev_err(hdev->dev, "Failed to prepare_enable clock\n");
1613		return ret;
1614	}
1615
1616	return 0;
1617}
1618#endif
1619
1620static const struct dev_pm_ops stm32_hash_pm_ops = {
1621	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1622				pm_runtime_force_resume)
1623	SET_RUNTIME_PM_OPS(stm32_hash_runtime_suspend,
1624			   stm32_hash_runtime_resume, NULL)
1625};
1626
1627static struct platform_driver stm32_hash_driver = {
1628	.probe		= stm32_hash_probe,
1629	.remove		= stm32_hash_remove,
1630	.driver		= {
1631		.name	= "stm32-hash",
1632		.pm = &stm32_hash_pm_ops,
1633		.of_match_table	= stm32_hash_of_match,
1634	}
1635};
1636
1637module_platform_driver(stm32_hash_driver);
1638
1639MODULE_DESCRIPTION("STM32 SHA1/224/256 & MD5 (HMAC) hw accelerator driver");
1640MODULE_AUTHOR("Lionel Debieve <lionel.debieve@st.com>");
1641MODULE_LICENSE("GPL v2");
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * This file is part of STM32 Crypto driver for Linux.
   4 *
   5 * Copyright (C) 2017, STMicroelectronics - All Rights Reserved
   6 * Author(s): Lionel DEBIEVE <lionel.debieve@st.com> for STMicroelectronics.
   7 */
   8
   9#include <crypto/engine.h>
  10#include <crypto/internal/hash.h>
  11#include <crypto/md5.h>
  12#include <crypto/scatterwalk.h>
  13#include <crypto/sha1.h>
  14#include <crypto/sha2.h>
  15#include <crypto/sha3.h>
  16#include <linux/clk.h>
 
  17#include <linux/delay.h>
  18#include <linux/dma-mapping.h>
  19#include <linux/dmaengine.h>
  20#include <linux/interrupt.h>
 
  21#include <linux/iopoll.h>
  22#include <linux/kernel.h>
  23#include <linux/module.h>
  24#include <linux/of.h>
  25#include <linux/platform_device.h>
  26#include <linux/pm_runtime.h>
  27#include <linux/reset.h>
  28#include <linux/string.h>
 
 
 
 
 
 
  29
  30#define HASH_CR				0x00
  31#define HASH_DIN			0x04
  32#define HASH_STR			0x08
  33#define HASH_UX500_HREG(x)		(0x0c + ((x) * 0x04))
  34#define HASH_IMR			0x20
  35#define HASH_SR				0x24
  36#define HASH_CSR(x)			(0x0F8 + ((x) * 0x04))
  37#define HASH_HREG(x)			(0x310 + ((x) * 0x04))
  38#define HASH_HWCFGR			0x3F0
  39#define HASH_VER			0x3F4
  40#define HASH_ID				0x3F8
  41
  42/* Control Register */
  43#define HASH_CR_INIT			BIT(2)
  44#define HASH_CR_DMAE			BIT(3)
  45#define HASH_CR_DATATYPE_POS		4
  46#define HASH_CR_MODE			BIT(6)
  47#define HASH_CR_ALGO_POS		7
  48#define HASH_CR_MDMAT			BIT(13)
  49#define HASH_CR_DMAA			BIT(14)
  50#define HASH_CR_LKEY			BIT(16)
  51
 
 
 
 
 
  52/* Interrupt */
  53#define HASH_DINIE			BIT(0)
  54#define HASH_DCIE			BIT(1)
  55
  56/* Interrupt Mask */
  57#define HASH_MASK_CALC_COMPLETION	BIT(0)
  58#define HASH_MASK_DATA_INPUT		BIT(1)
  59
 
 
 
  60/* Status Flags */
  61#define HASH_SR_DATA_INPUT_READY	BIT(0)
  62#define HASH_SR_OUTPUT_READY		BIT(1)
  63#define HASH_SR_DMA_ACTIVE		BIT(2)
  64#define HASH_SR_BUSY			BIT(3)
  65
  66/* STR Register */
  67#define HASH_STR_NBLW_MASK		GENMASK(4, 0)
  68#define HASH_STR_DCAL			BIT(8)
  69
  70/* HWCFGR Register */
  71#define HASH_HWCFG_DMA_MASK		GENMASK(3, 0)
  72
  73/* Context swap register */
  74#define HASH_CSR_NB_SHA256_HMAC		54
  75#define HASH_CSR_NB_SHA256		38
  76#define HASH_CSR_NB_SHA512_HMAC		103
  77#define HASH_CSR_NB_SHA512		91
  78#define HASH_CSR_NB_SHA3_HMAC		88
  79#define HASH_CSR_NB_SHA3		72
  80#define HASH_CSR_NB_MAX			HASH_CSR_NB_SHA512_HMAC
  81
  82#define HASH_FLAGS_INIT			BIT(0)
  83#define HASH_FLAGS_OUTPUT_READY		BIT(1)
  84#define HASH_FLAGS_CPU			BIT(2)
  85#define HASH_FLAGS_DMA_ACTIVE		BIT(3)
  86#define HASH_FLAGS_HMAC_INIT		BIT(4)
  87#define HASH_FLAGS_HMAC_FINAL		BIT(5)
  88#define HASH_FLAGS_HMAC_KEY		BIT(6)
  89#define HASH_FLAGS_SHA3_MODE		BIT(7)
 
  90#define HASH_FLAGS_FINAL		BIT(15)
  91#define HASH_FLAGS_FINUP		BIT(16)
  92#define HASH_FLAGS_ALGO_MASK		GENMASK(20, 17)
  93#define HASH_FLAGS_ALGO_SHIFT		17
  94#define HASH_FLAGS_ERRORS		BIT(21)
  95#define HASH_FLAGS_EMPTY		BIT(22)
 
 
  96#define HASH_FLAGS_HMAC			BIT(23)
  97#define HASH_FLAGS_SGS_COPIED		BIT(24)
  98
  99#define HASH_OP_UPDATE			1
 100#define HASH_OP_FINAL			2
 101
 102#define HASH_BURST_LEVEL		4
 103
 104enum stm32_hash_data_format {
 105	HASH_DATA_32_BITS		= 0x0,
 106	HASH_DATA_16_BITS		= 0x1,
 107	HASH_DATA_8_BITS		= 0x2,
 108	HASH_DATA_1_BIT			= 0x3
 109};
 110
 111#define HASH_BUFLEN			(SHA3_224_BLOCK_SIZE + 4)
 112#define HASH_MAX_KEY_SIZE		(SHA512_BLOCK_SIZE * 8)
 113
 114enum stm32_hash_algo {
 115	HASH_SHA1			= 0,
 116	HASH_MD5			= 1,
 117	HASH_SHA224			= 2,
 118	HASH_SHA256			= 3,
 119	HASH_SHA3_224			= 4,
 120	HASH_SHA3_256			= 5,
 121	HASH_SHA3_384			= 6,
 122	HASH_SHA3_512			= 7,
 123	HASH_SHA384			= 12,
 124	HASH_SHA512			= 15,
 125};
 126
 127enum ux500_hash_algo {
 128	HASH_SHA256_UX500		= 0,
 129	HASH_SHA1_UX500			= 1,
 130};
 131
 132#define HASH_AUTOSUSPEND_DELAY		50
 133
 134struct stm32_hash_ctx {
 
 135	struct stm32_hash_dev	*hdev;
 136	struct crypto_shash	*xtfm;
 137	unsigned long		flags;
 138
 139	u8			key[HASH_MAX_KEY_SIZE];
 140	int			keylen;
 141};
 142
 143struct stm32_hash_state {
 144	u32			flags;
 145
 146	u16			bufcnt;
 147	u16			blocklen;
 148
 149	u8 buffer[HASH_BUFLEN] __aligned(sizeof(u32));
 150
 151	/* hash state */
 152	u32			hw_context[3 + HASH_CSR_NB_MAX];
 153};
 154
 155struct stm32_hash_request_ctx {
 156	struct stm32_hash_dev	*hdev;
 
 157	unsigned long		op;
 158
 159	u8 digest[SHA512_DIGEST_SIZE] __aligned(sizeof(u32));
 160	size_t			digcnt;
 
 
 161
 
 162	struct scatterlist	*sg;
 163	struct scatterlist	sgl[2]; /* scatterlist used to realize alignment */
 164	unsigned int		offset;
 165	unsigned int		total;
 166	struct scatterlist	sg_key;
 167
 168	dma_addr_t		dma_addr;
 169	size_t			dma_ct;
 170	int			nents;
 171
 172	u8			data_type;
 173
 174	struct stm32_hash_state state;
 
 
 
 175};
 176
 177struct stm32_hash_algs_info {
 178	struct ahash_engine_alg	*algs_list;
 179	size_t			size;
 180};
 181
 182struct stm32_hash_pdata {
 183	const int				alg_shift;
 184	const struct stm32_hash_algs_info	*algs_info;
 185	size_t					algs_info_size;
 186	bool					has_sr;
 187	bool					has_mdmat;
 188	bool					context_secured;
 189	bool					broken_emptymsg;
 190	bool					ux500;
 191};
 192
 193struct stm32_hash_dev {
 194	struct list_head	list;
 195	struct device		*dev;
 196	struct clk		*clk;
 197	struct reset_control	*rst;
 198	void __iomem		*io_base;
 199	phys_addr_t		phys_base;
 200	u8			xmit_buf[HASH_BUFLEN] __aligned(sizeof(u32));
 201	u32			dma_mode;
 202	bool			polled;
 203
 204	struct ahash_request	*req;
 205	struct crypto_engine	*engine;
 206
 
 207	unsigned long		flags;
 208
 209	struct dma_chan		*dma_lch;
 210	struct completion	dma_completion;
 211
 212	const struct stm32_hash_pdata	*pdata;
 213};
 214
 215struct stm32_hash_drv {
 216	struct list_head	dev_list;
 217	spinlock_t		lock; /* List protection access */
 218};
 219
 220static struct stm32_hash_drv stm32_hash = {
 221	.dev_list = LIST_HEAD_INIT(stm32_hash.dev_list),
 222	.lock = __SPIN_LOCK_UNLOCKED(stm32_hash.lock),
 223};
 224
 225static void stm32_hash_dma_callback(void *param);
 226static int stm32_hash_prepare_request(struct ahash_request *req);
 227static void stm32_hash_unprepare_request(struct ahash_request *req);
 228
 229static inline u32 stm32_hash_read(struct stm32_hash_dev *hdev, u32 offset)
 230{
 231	return readl_relaxed(hdev->io_base + offset);
 232}
 233
 234static inline void stm32_hash_write(struct stm32_hash_dev *hdev,
 235				    u32 offset, u32 value)
 236{
 237	writel_relaxed(value, hdev->io_base + offset);
 238}
 239
 240/**
 241 * stm32_hash_wait_busy - wait until hash processor is available. It return an
 242 * error if the hash core is processing a block of data for more than 10 ms.
 243 * @hdev: the stm32_hash_dev device.
 244 */
 245static inline int stm32_hash_wait_busy(struct stm32_hash_dev *hdev)
 246{
 247	u32 status;
 248
 249	/* The Ux500 lacks the special status register, we poll the DCAL bit instead */
 250	if (!hdev->pdata->has_sr)
 251		return readl_relaxed_poll_timeout(hdev->io_base + HASH_STR, status,
 252						  !(status & HASH_STR_DCAL), 10, 10000);
 253
 254	return readl_relaxed_poll_timeout(hdev->io_base + HASH_SR, status,
 255				   !(status & HASH_SR_BUSY), 10, 10000);
 256}
 257
 258/**
 259 * stm32_hash_set_nblw - set the number of valid bytes in the last word.
 260 * @hdev: the stm32_hash_dev device.
 261 * @length: the length of the final word.
 262 */
 263static void stm32_hash_set_nblw(struct stm32_hash_dev *hdev, int length)
 264{
 265	u32 reg;
 266
 267	reg = stm32_hash_read(hdev, HASH_STR);
 268	reg &= ~(HASH_STR_NBLW_MASK);
 269	reg |= (8U * ((length) % 4U));
 270	stm32_hash_write(hdev, HASH_STR, reg);
 271}
 272
 273static int stm32_hash_write_key(struct stm32_hash_dev *hdev)
 274{
 275	struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
 276	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 277	u32 reg;
 278	int keylen = ctx->keylen;
 279	void *key = ctx->key;
 280
 281	if (keylen) {
 282		stm32_hash_set_nblw(hdev, keylen);
 283
 284		while (keylen > 0) {
 285			stm32_hash_write(hdev, HASH_DIN, *(u32 *)key);
 286			keylen -= 4;
 287			key += 4;
 288		}
 289
 290		reg = stm32_hash_read(hdev, HASH_STR);
 291		reg |= HASH_STR_DCAL;
 292		stm32_hash_write(hdev, HASH_STR, reg);
 293
 294		return -EINPROGRESS;
 295	}
 296
 297	return 0;
 298}
 299
 300/**
 301 * stm32_hash_write_ctrl - Initialize the hash processor, only if
 302 * HASH_FLAGS_INIT is set.
 303 * @hdev: the stm32_hash_dev device
 304 */
 305static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev)
 306{
 307	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
 308	struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
 309	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 310	struct stm32_hash_state *state = &rctx->state;
 311	u32 alg = (state->flags & HASH_FLAGS_ALGO_MASK) >> HASH_FLAGS_ALGO_SHIFT;
 312
 313	u32 reg = HASH_CR_INIT;
 314
 315	if (!(hdev->flags & HASH_FLAGS_INIT)) {
 316		if (hdev->pdata->ux500) {
 317			reg |= ((alg & BIT(0)) << HASH_CR_ALGO_POS);
 318		} else {
 319			if (hdev->pdata->alg_shift == HASH_CR_ALGO_POS)
 320				reg |= ((alg & BIT(1)) << 17) |
 321				       ((alg & BIT(0)) << HASH_CR_ALGO_POS);
 322			else
 323				reg |= alg << hdev->pdata->alg_shift;
 
 
 
 
 
 
 
 324		}
 325
 326		reg |= (rctx->data_type << HASH_CR_DATATYPE_POS);
 327
 328		if (state->flags & HASH_FLAGS_HMAC) {
 329			hdev->flags |= HASH_FLAGS_HMAC;
 330			reg |= HASH_CR_MODE;
 331			if (ctx->keylen > crypto_ahash_blocksize(tfm))
 332				reg |= HASH_CR_LKEY;
 333		}
 334
 335		if (!hdev->polled)
 336			stm32_hash_write(hdev, HASH_IMR, HASH_DCIE);
 337
 338		stm32_hash_write(hdev, HASH_CR, reg);
 339
 340		hdev->flags |= HASH_FLAGS_INIT;
 341
 342		/*
 343		 * After first block + 1 words are fill up,
 344		 * we only need to fill 1 block to start partial computation
 345		 */
 346		rctx->state.blocklen -= sizeof(u32);
 347
 348		dev_dbg(hdev->dev, "Write Control %x\n", reg);
 349	}
 350}
 351
 352static void stm32_hash_append_sg(struct stm32_hash_request_ctx *rctx)
 353{
 354	struct stm32_hash_state *state = &rctx->state;
 355	size_t count;
 356
 357	while ((state->bufcnt < state->blocklen) && rctx->total) {
 358		count = min(rctx->sg->length - rctx->offset, rctx->total);
 359		count = min_t(size_t, count, state->blocklen - state->bufcnt);
 360
 361		if (count <= 0) {
 362			if ((rctx->sg->length == 0) && !sg_is_last(rctx->sg)) {
 363				rctx->sg = sg_next(rctx->sg);
 364				continue;
 365			} else {
 366				break;
 367			}
 368		}
 369
 370		scatterwalk_map_and_copy(state->buffer + state->bufcnt,
 371					 rctx->sg, rctx->offset, count, 0);
 372
 373		state->bufcnt += count;
 374		rctx->offset += count;
 375		rctx->total -= count;
 376
 377		if (rctx->offset == rctx->sg->length) {
 378			rctx->sg = sg_next(rctx->sg);
 379			if (rctx->sg)
 380				rctx->offset = 0;
 381			else
 382				rctx->total = 0;
 383		}
 384	}
 385}
 386
 387static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev,
 388			       const u8 *buf, size_t length, int final)
 389{
 390	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
 391	struct stm32_hash_state *state = &rctx->state;
 392	unsigned int count, len32;
 393	const u32 *buffer = (const u32 *)buf;
 394	u32 reg;
 395
 396	if (final) {
 397		hdev->flags |= HASH_FLAGS_FINAL;
 398
 399		/* Do not process empty messages if hw is buggy. */
 400		if (!(hdev->flags & HASH_FLAGS_INIT) && !length &&
 401		    hdev->pdata->broken_emptymsg) {
 402			state->flags |= HASH_FLAGS_EMPTY;
 403			return 0;
 404		}
 405	}
 406
 407	len32 = DIV_ROUND_UP(length, sizeof(u32));
 408
 409	dev_dbg(hdev->dev, "%s: length: %zd, final: %x len32 %i\n",
 410		__func__, length, final, len32);
 411
 412	hdev->flags |= HASH_FLAGS_CPU;
 413
 414	stm32_hash_write_ctrl(hdev);
 415
 416	if (stm32_hash_wait_busy(hdev))
 417		return -ETIMEDOUT;
 418
 419	if ((hdev->flags & HASH_FLAGS_HMAC) &&
 420	    (!(hdev->flags & HASH_FLAGS_HMAC_KEY))) {
 421		hdev->flags |= HASH_FLAGS_HMAC_KEY;
 422		stm32_hash_write_key(hdev);
 423		if (stm32_hash_wait_busy(hdev))
 424			return -ETIMEDOUT;
 425	}
 426
 427	for (count = 0; count < len32; count++)
 428		stm32_hash_write(hdev, HASH_DIN, buffer[count]);
 429
 430	if (final) {
 431		if (stm32_hash_wait_busy(hdev))
 432			return -ETIMEDOUT;
 433
 434		stm32_hash_set_nblw(hdev, length);
 435		reg = stm32_hash_read(hdev, HASH_STR);
 436		reg |= HASH_STR_DCAL;
 437		stm32_hash_write(hdev, HASH_STR, reg);
 438		if (hdev->flags & HASH_FLAGS_HMAC) {
 439			if (stm32_hash_wait_busy(hdev))
 440				return -ETIMEDOUT;
 441			stm32_hash_write_key(hdev);
 442		}
 443		return -EINPROGRESS;
 444	}
 445
 446	return 0;
 447}
 448
 449static int hash_swap_reg(struct stm32_hash_request_ctx *rctx)
 450{
 451	struct stm32_hash_state *state = &rctx->state;
 452
 453	switch ((state->flags & HASH_FLAGS_ALGO_MASK) >>
 454		HASH_FLAGS_ALGO_SHIFT) {
 455	case HASH_MD5:
 456	case HASH_SHA1:
 457	case HASH_SHA224:
 458	case HASH_SHA256:
 459		if (state->flags & HASH_FLAGS_HMAC)
 460			return HASH_CSR_NB_SHA256_HMAC;
 461		else
 462			return HASH_CSR_NB_SHA256;
 463		break;
 464
 465	case HASH_SHA384:
 466	case HASH_SHA512:
 467		if (state->flags & HASH_FLAGS_HMAC)
 468			return HASH_CSR_NB_SHA512_HMAC;
 469		else
 470			return HASH_CSR_NB_SHA512;
 471		break;
 472
 473	case HASH_SHA3_224:
 474	case HASH_SHA3_256:
 475	case HASH_SHA3_384:
 476	case HASH_SHA3_512:
 477		if (state->flags & HASH_FLAGS_HMAC)
 478			return HASH_CSR_NB_SHA3_HMAC;
 479		else
 480			return HASH_CSR_NB_SHA3;
 481		break;
 482
 483	default:
 484		return -EINVAL;
 485	}
 486}
 487
 488static int stm32_hash_update_cpu(struct stm32_hash_dev *hdev)
 489{
 490	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
 491	struct stm32_hash_state *state = &rctx->state;
 492	int bufcnt, err = 0, final;
 493
 494	dev_dbg(hdev->dev, "%s flags %x\n", __func__, state->flags);
 495
 496	final = state->flags & HASH_FLAGS_FINAL;
 497
 498	while ((rctx->total >= state->blocklen) ||
 499	       (state->bufcnt + rctx->total >= state->blocklen)) {
 500		stm32_hash_append_sg(rctx);
 501		bufcnt = state->bufcnt;
 502		state->bufcnt = 0;
 503		err = stm32_hash_xmit_cpu(hdev, state->buffer, bufcnt, 0);
 504		if (err)
 505			return err;
 506	}
 507
 508	stm32_hash_append_sg(rctx);
 509
 510	if (final) {
 511		bufcnt = state->bufcnt;
 512		state->bufcnt = 0;
 513		return stm32_hash_xmit_cpu(hdev, state->buffer, bufcnt, 1);
 
 514	}
 515
 516	return err;
 517}
 518
 519static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev,
 520			       struct scatterlist *sg, int length, int mdmat)
 521{
 522	struct dma_async_tx_descriptor *in_desc;
 523	dma_cookie_t cookie;
 524	u32 reg;
 525	int err;
 526
 527	dev_dbg(hdev->dev, "%s mdmat: %x length: %d\n", __func__, mdmat, length);
 528
 529	/* do not use dma if there is no data to send */
 530	if (length <= 0)
 531		return 0;
 532
 533	in_desc = dmaengine_prep_slave_sg(hdev->dma_lch, sg, 1,
 534					  DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT |
 535					  DMA_CTRL_ACK);
 536	if (!in_desc) {
 537		dev_err(hdev->dev, "dmaengine_prep_slave error\n");
 538		return -ENOMEM;
 539	}
 540
 541	reinit_completion(&hdev->dma_completion);
 542	in_desc->callback = stm32_hash_dma_callback;
 543	in_desc->callback_param = hdev;
 544
 
 545	hdev->flags |= HASH_FLAGS_DMA_ACTIVE;
 546
 547	reg = stm32_hash_read(hdev, HASH_CR);
 548
 549	if (hdev->pdata->has_mdmat) {
 550		if (mdmat)
 551			reg |= HASH_CR_MDMAT;
 552		else
 553			reg &= ~HASH_CR_MDMAT;
 554	}
 555	reg |= HASH_CR_DMAE;
 556
 557	stm32_hash_write(hdev, HASH_CR, reg);
 558
 
 559
 560	cookie = dmaengine_submit(in_desc);
 561	err = dma_submit_error(cookie);
 562	if (err)
 563		return -ENOMEM;
 564
 565	dma_async_issue_pending(hdev->dma_lch);
 566
 567	if (!wait_for_completion_timeout(&hdev->dma_completion,
 568					 msecs_to_jiffies(100)))
 569		err = -ETIMEDOUT;
 570
 571	if (dma_async_is_tx_complete(hdev->dma_lch, cookie,
 572				     NULL, NULL) != DMA_COMPLETE)
 573		err = -ETIMEDOUT;
 574
 575	if (err) {
 576		dev_err(hdev->dev, "DMA Error %i\n", err);
 577		dmaengine_terminate_all(hdev->dma_lch);
 578		return err;
 579	}
 580
 581	return -EINPROGRESS;
 582}
 583
 584static void stm32_hash_dma_callback(void *param)
 585{
 586	struct stm32_hash_dev *hdev = param;
 587
 588	complete(&hdev->dma_completion);
 
 
 589}
 590
 591static int stm32_hash_hmac_dma_send(struct stm32_hash_dev *hdev)
 592{
 593	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
 594	struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
 595	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 596	int err;
 597
 598	if (ctx->keylen < rctx->state.blocklen || hdev->dma_mode > 0) {
 599		err = stm32_hash_write_key(hdev);
 600		if (stm32_hash_wait_busy(hdev))
 601			return -ETIMEDOUT;
 602	} else {
 603		if (!(hdev->flags & HASH_FLAGS_HMAC_KEY))
 604			sg_init_one(&rctx->sg_key, ctx->key,
 605				    ALIGN(ctx->keylen, sizeof(u32)));
 606
 607		rctx->dma_ct = dma_map_sg(hdev->dev, &rctx->sg_key, 1,
 608					  DMA_TO_DEVICE);
 609		if (rctx->dma_ct == 0) {
 610			dev_err(hdev->dev, "dma_map_sg error\n");
 611			return -ENOMEM;
 612		}
 613
 614		err = stm32_hash_xmit_dma(hdev, &rctx->sg_key, ctx->keylen, 0);
 615
 616		dma_unmap_sg(hdev->dev, &rctx->sg_key, 1, DMA_TO_DEVICE);
 617	}
 618
 619	return err;
 620}
 621
 622static int stm32_hash_dma_init(struct stm32_hash_dev *hdev)
 623{
 624	struct dma_slave_config dma_conf;
 625	struct dma_chan *chan;
 626	int err;
 627
 628	memset(&dma_conf, 0, sizeof(dma_conf));
 629
 630	dma_conf.direction = DMA_MEM_TO_DEV;
 631	dma_conf.dst_addr = hdev->phys_base + HASH_DIN;
 632	dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 633	dma_conf.src_maxburst = HASH_BURST_LEVEL;
 634	dma_conf.dst_maxburst = HASH_BURST_LEVEL;
 635	dma_conf.device_fc = false;
 636
 637	chan = dma_request_chan(hdev->dev, "in");
 638	if (IS_ERR(chan))
 639		return PTR_ERR(chan);
 640
 641	hdev->dma_lch = chan;
 642
 643	err = dmaengine_slave_config(hdev->dma_lch, &dma_conf);
 644	if (err) {
 645		dma_release_channel(hdev->dma_lch);
 646		hdev->dma_lch = NULL;
 647		dev_err(hdev->dev, "Couldn't configure DMA slave.\n");
 648		return err;
 649	}
 650
 651	init_completion(&hdev->dma_completion);
 652
 653	return 0;
 654}
 655
 656static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
 657{
 658	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
 659	u32 *buffer = (void *)rctx->state.buffer;
 660	struct scatterlist sg[1], *tsg;
 661	int err = 0, reg, ncp = 0;
 662	unsigned int i, len = 0, bufcnt = 0;
 663	bool final = hdev->flags & HASH_FLAGS_FINAL;
 664	bool is_last = false;
 665	u32 last_word;
 666
 667	dev_dbg(hdev->dev, "%s total: %d bufcnt: %d final: %d\n",
 668		__func__, rctx->total, rctx->state.bufcnt, final);
 
 
 669
 670	if (rctx->nents < 0)
 671		return -EINVAL;
 672
 673	stm32_hash_write_ctrl(hdev);
 674
 675	if (hdev->flags & HASH_FLAGS_HMAC && (!(hdev->flags & HASH_FLAGS_HMAC_KEY))) {
 676		hdev->flags |= HASH_FLAGS_HMAC_KEY;
 677		err = stm32_hash_hmac_dma_send(hdev);
 678		if (err != -EINPROGRESS)
 679			return err;
 680	}
 681
 682	for_each_sg(rctx->sg, tsg, rctx->nents, i) {
 
 
 683		sg[0] = *tsg;
 684		len = sg->length;
 
 
 
 
 
 
 
 685
 686		if (sg_is_last(sg) || (bufcnt + sg[0].length) >= rctx->total) {
 687			if (!final) {
 688				/* Always manually put the last word of a non-final transfer. */
 689				len -= sizeof(u32);
 690				sg_pcopy_to_buffer(rctx->sg, rctx->nents, &last_word, 4, len);
 691				sg->length -= sizeof(u32);
 692			} else {
 693				/*
 694				 * In Multiple DMA mode, DMA must be aborted before the final
 695				 * transfer.
 696				 */
 697				sg->length = rctx->total - bufcnt;
 698				if (hdev->dma_mode > 0) {
 699					len = (ALIGN(sg->length, 16) - 16);
 700
 701					ncp = sg_pcopy_to_buffer(rctx->sg, rctx->nents,
 702								 rctx->state.buffer,
 703								 sg->length - len,
 704								 rctx->total - sg->length + len);
 705
 706					if (!len)
 707						break;
 708
 709					sg->length = len;
 710				} else {
 711					is_last = true;
 712					if (!(IS_ALIGNED(sg->length, sizeof(u32)))) {
 713						len = sg->length;
 714						sg->length = ALIGN(sg->length,
 715								   sizeof(u32));
 716					}
 717				}
 718			}
 719		}
 720
 721		rctx->dma_ct = dma_map_sg(hdev->dev, sg, 1,
 722					  DMA_TO_DEVICE);
 723		if (rctx->dma_ct == 0) {
 724			dev_err(hdev->dev, "dma_map_sg error\n");
 725			return -ENOMEM;
 726		}
 727
 728		err = stm32_hash_xmit_dma(hdev, sg, len, !is_last);
 729
 730		/* The last word of a non final transfer is sent manually. */
 731		if (!final) {
 732			stm32_hash_write(hdev, HASH_DIN, last_word);
 733			len += sizeof(u32);
 734		}
 735
 736		rctx->total -= len;
 737
 738		bufcnt += sg[0].length;
 739		dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
 740
 741		if (err == -ENOMEM || err == -ETIMEDOUT)
 742			return err;
 743		if (is_last)
 744			break;
 745	}
 746
 747	/*
 748	 * When the second last block transfer of 4 words is performed by the DMA,
 749	 * the software must set the DMA Abort bit (DMAA) to 1 before completing the
 750	 * last transfer of 4 words or less.
 751	 */
 752	if (final) {
 753		if (hdev->dma_mode > 0) {
 754			if (stm32_hash_wait_busy(hdev))
 755				return -ETIMEDOUT;
 756			reg = stm32_hash_read(hdev, HASH_CR);
 757			reg &= ~HASH_CR_DMAE;
 758			reg |= HASH_CR_DMAA;
 759			stm32_hash_write(hdev, HASH_CR, reg);
 760
 761			if (ncp) {
 762				memset(buffer + ncp, 0, 4 - DIV_ROUND_UP(ncp, sizeof(u32)));
 763				writesl(hdev->io_base + HASH_DIN, buffer,
 764					DIV_ROUND_UP(ncp, sizeof(u32)));
 765			}
 766
 767			stm32_hash_set_nblw(hdev, ncp);
 768			reg = stm32_hash_read(hdev, HASH_STR);
 769			reg |= HASH_STR_DCAL;
 770			stm32_hash_write(hdev, HASH_STR, reg);
 771			err = -EINPROGRESS;
 772		}
 
 
 
 
 
 
 773
 774		/*
 775		 * The hash processor needs the key to be loaded a second time in order
 776		 * to process the HMAC.
 777		 */
 778		if (hdev->flags & HASH_FLAGS_HMAC) {
 779			if (stm32_hash_wait_busy(hdev))
 780				return -ETIMEDOUT;
 781			err = stm32_hash_hmac_dma_send(hdev);
 782		}
 783
 784		return err;
 785	}
 786
 787	if (err != -EINPROGRESS)
 788		return err;
 789
 790	return 0;
 791}
 792
 793static struct stm32_hash_dev *stm32_hash_find_dev(struct stm32_hash_ctx *ctx)
 794{
 795	struct stm32_hash_dev *hdev = NULL, *tmp;
 796
 797	spin_lock_bh(&stm32_hash.lock);
 798	if (!ctx->hdev) {
 799		list_for_each_entry(tmp, &stm32_hash.dev_list, list) {
 800			hdev = tmp;
 801			break;
 802		}
 803		ctx->hdev = hdev;
 804	} else {
 805		hdev = ctx->hdev;
 806	}
 807
 808	spin_unlock_bh(&stm32_hash.lock);
 809
 810	return hdev;
 811}
 812
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 813static int stm32_hash_init(struct ahash_request *req)
 814{
 815	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 816	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 817	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
 818	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
 819	struct stm32_hash_state *state = &rctx->state;
 820	bool sha3_mode = ctx->flags & HASH_FLAGS_SHA3_MODE;
 821
 822	rctx->hdev = hdev;
 823	state->flags = 0;
 824
 825	if (!(hdev->dma_lch &&  hdev->pdata->has_mdmat))
 826		state->flags |= HASH_FLAGS_CPU;
 827
 828	if (sha3_mode)
 829		state->flags |= HASH_FLAGS_SHA3_MODE;
 830
 831	rctx->digcnt = crypto_ahash_digestsize(tfm);
 832	switch (rctx->digcnt) {
 833	case MD5_DIGEST_SIZE:
 834		state->flags |= HASH_MD5 << HASH_FLAGS_ALGO_SHIFT;
 835		break;
 836	case SHA1_DIGEST_SIZE:
 837		if (hdev->pdata->ux500)
 838			state->flags |= HASH_SHA1_UX500 << HASH_FLAGS_ALGO_SHIFT;
 839		else
 840			state->flags |= HASH_SHA1 << HASH_FLAGS_ALGO_SHIFT;
 841		break;
 842	case SHA224_DIGEST_SIZE:
 843		if (sha3_mode)
 844			state->flags |= HASH_SHA3_224 << HASH_FLAGS_ALGO_SHIFT;
 845		else
 846			state->flags |= HASH_SHA224 << HASH_FLAGS_ALGO_SHIFT;
 847		break;
 848	case SHA256_DIGEST_SIZE:
 849		if (sha3_mode) {
 850			state->flags |= HASH_SHA3_256 << HASH_FLAGS_ALGO_SHIFT;
 851		} else {
 852			if (hdev->pdata->ux500)
 853				state->flags |= HASH_SHA256_UX500 << HASH_FLAGS_ALGO_SHIFT;
 854			else
 855				state->flags |= HASH_SHA256 << HASH_FLAGS_ALGO_SHIFT;
 856		}
 857		break;
 858	case SHA384_DIGEST_SIZE:
 859		if (sha3_mode)
 860			state->flags |= HASH_SHA3_384 << HASH_FLAGS_ALGO_SHIFT;
 861		else
 862			state->flags |= HASH_SHA384 << HASH_FLAGS_ALGO_SHIFT;
 863		break;
 864	case SHA512_DIGEST_SIZE:
 865		if (sha3_mode)
 866			state->flags |= HASH_SHA3_512 << HASH_FLAGS_ALGO_SHIFT;
 867		else
 868			state->flags |= HASH_SHA512 << HASH_FLAGS_ALGO_SHIFT;
 869		break;
 870	default:
 871		return -EINVAL;
 872	}
 873
 874	rctx->state.bufcnt = 0;
 875	rctx->state.blocklen = crypto_ahash_blocksize(tfm) + sizeof(u32);
 876	if (rctx->state.blocklen > HASH_BUFLEN) {
 877		dev_err(hdev->dev, "Error, block too large");
 878		return -EINVAL;
 879	}
 880	rctx->nents = 0;
 881	rctx->total = 0;
 882	rctx->offset = 0;
 883	rctx->data_type = HASH_DATA_8_BITS;
 884
 
 
 885	if (ctx->flags & HASH_FLAGS_HMAC)
 886		state->flags |= HASH_FLAGS_HMAC;
 887
 888	dev_dbg(hdev->dev, "%s Flags %x\n", __func__, state->flags);
 889
 890	return 0;
 891}
 892
 893static int stm32_hash_update_req(struct stm32_hash_dev *hdev)
 894{
 895	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
 896	struct stm32_hash_state *state = &rctx->state;
 897
 898	dev_dbg(hdev->dev, "update_req: total: %u, digcnt: %zd, final: 0",
 899		rctx->total, rctx->digcnt);
 900
 901	if (!(state->flags & HASH_FLAGS_CPU))
 902		return stm32_hash_dma_send(hdev);
 903
 904	return stm32_hash_update_cpu(hdev);
 905}
 906
 907static int stm32_hash_final_req(struct stm32_hash_dev *hdev)
 908{
 909	struct ahash_request *req = hdev->req;
 910	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
 911	struct stm32_hash_state *state = &rctx->state;
 912	int buflen = state->bufcnt;
 913
 914	if (!(state->flags & HASH_FLAGS_CPU)) {
 915		hdev->flags |= HASH_FLAGS_FINAL;
 916		return stm32_hash_dma_send(hdev);
 917	}
 918
 919	if (state->flags & HASH_FLAGS_FINUP)
 920		return stm32_hash_update_req(hdev);
 
 
 921
 922	state->bufcnt = 0;
 923
 924	return stm32_hash_xmit_cpu(hdev, state->buffer, buflen, 1);
 925}
 926
 927static void stm32_hash_emptymsg_fallback(struct ahash_request *req)
 928{
 929	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
 930	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(ahash);
 931	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
 932	struct stm32_hash_dev *hdev = rctx->hdev;
 933	int ret;
 934
 935	dev_dbg(hdev->dev, "use fallback message size 0 key size %d\n",
 936		ctx->keylen);
 937
 938	if (!ctx->xtfm) {
 939		dev_err(hdev->dev, "no fallback engine\n");
 
 
 
 
 
 
 
 
 
 940		return;
 941	}
 942
 943	if (ctx->keylen) {
 944		ret = crypto_shash_setkey(ctx->xtfm, ctx->key, ctx->keylen);
 945		if (ret) {
 946			dev_err(hdev->dev, "failed to set key ret=%d\n", ret);
 947			return;
 948		}
 949	}
 950
 951	ret = crypto_shash_tfm_digest(ctx->xtfm, NULL, 0, rctx->digest);
 952	if (ret)
 953		dev_err(hdev->dev, "shash digest error\n");
 954}
 955
 956static void stm32_hash_copy_hash(struct ahash_request *req)
 957{
 958	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 959	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
 960	struct stm32_hash_state *state = &rctx->state;
 961	struct stm32_hash_dev *hdev = rctx->hdev;
 962	__be32 *hash = (void *)rctx->digest;
 963	unsigned int i, hashsize;
 964
 965	if (hdev->pdata->broken_emptymsg && (state->flags & HASH_FLAGS_EMPTY))
 966		return stm32_hash_emptymsg_fallback(req);
 967
 968	hashsize = crypto_ahash_digestsize(tfm);
 969
 970	for (i = 0; i < hashsize / sizeof(u32); i++) {
 971		if (hdev->pdata->ux500)
 972			hash[i] = cpu_to_be32(stm32_hash_read(hdev,
 973					      HASH_UX500_HREG(i)));
 974		else
 975			hash[i] = cpu_to_be32(stm32_hash_read(hdev,
 976					      HASH_HREG(i)));
 977	}
 978}
 979
 980static int stm32_hash_finish(struct ahash_request *req)
 981{
 982	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
 983	u32 reg;
 984
 985	reg = stm32_hash_read(rctx->hdev, HASH_SR);
 986	reg &= ~HASH_SR_OUTPUT_READY;
 987	stm32_hash_write(rctx->hdev, HASH_SR, reg);
 988
 989	if (!req->result)
 990		return -EINVAL;
 991
 992	memcpy(req->result, rctx->digest, rctx->digcnt);
 993
 994	return 0;
 995}
 996
 997static void stm32_hash_finish_req(struct ahash_request *req, int err)
 998{
 999	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1000	struct stm32_hash_state *state = &rctx->state;
1001	struct stm32_hash_dev *hdev = rctx->hdev;
1002
1003	if (hdev->flags & HASH_FLAGS_DMA_ACTIVE)
1004		state->flags |= HASH_FLAGS_DMA_ACTIVE;
1005	else
1006		state->flags &= ~HASH_FLAGS_DMA_ACTIVE;
1007
1008	if (!err && (HASH_FLAGS_FINAL & hdev->flags)) {
1009		stm32_hash_copy_hash(req);
1010		err = stm32_hash_finish(req);
 
 
 
 
 
 
 
1011	}
1012
1013	/* Finalized request mist be unprepared here */
1014	stm32_hash_unprepare_request(req);
1015
1016	crypto_finalize_hash_request(hdev->engine, req, err);
1017}
1018
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1019static int stm32_hash_handle_queue(struct stm32_hash_dev *hdev,
1020				   struct ahash_request *req)
1021{
1022	return crypto_transfer_hash_request_to_engine(hdev->engine, req);
1023}
1024
1025static int stm32_hash_one_request(struct crypto_engine *engine, void *areq)
1026{
1027	struct ahash_request *req = container_of(areq, struct ahash_request,
1028						 base);
1029	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
1030	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1031	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
1032	struct stm32_hash_state *state = &rctx->state;
1033	int swap_reg;
1034	int err = 0;
1035
1036	if (!hdev)
1037		return -ENODEV;
1038
 
 
 
 
1039	dev_dbg(hdev->dev, "processing new req, op: %lu, nbytes %d\n",
1040		rctx->op, req->nbytes);
1041
1042	pm_runtime_get_sync(hdev->dev);
 
 
 
 
 
 
 
 
 
 
1043
1044	err = stm32_hash_prepare_request(req);
1045	if (err)
1046		return err;
1047
1048	hdev->req = req;
1049	hdev->flags = 0;
1050	swap_reg = hash_swap_reg(rctx);
1051
1052	if (state->flags & HASH_FLAGS_INIT) {
1053		u32 *preg = rctx->state.hw_context;
1054		u32 reg;
1055		int i;
1056
1057		if (!hdev->pdata->ux500)
1058			stm32_hash_write(hdev, HASH_IMR, *preg++);
1059		stm32_hash_write(hdev, HASH_STR, *preg++);
1060		stm32_hash_write(hdev, HASH_CR, *preg);
1061		reg = *preg++ | HASH_CR_INIT;
1062		stm32_hash_write(hdev, HASH_CR, reg);
1063
1064		for (i = 0; i < swap_reg; i++)
1065			stm32_hash_write(hdev, HASH_CSR(i), *preg++);
1066
1067		hdev->flags |= HASH_FLAGS_INIT;
1068
1069		if (state->flags & HASH_FLAGS_HMAC)
1070			hdev->flags |= HASH_FLAGS_HMAC |
1071				       HASH_FLAGS_HMAC_KEY;
1072
1073		if (state->flags & HASH_FLAGS_CPU)
1074			hdev->flags |= HASH_FLAGS_CPU;
1075
1076		if (state->flags & HASH_FLAGS_DMA_ACTIVE)
1077			hdev->flags |= HASH_FLAGS_DMA_ACTIVE;
1078	}
1079
1080	if (rctx->op == HASH_OP_UPDATE)
1081		err = stm32_hash_update_req(hdev);
1082	else if (rctx->op == HASH_OP_FINAL)
1083		err = stm32_hash_final_req(hdev);
1084
1085	/* If we have an IRQ, wait for that, else poll for completion */
1086	if (err == -EINPROGRESS && hdev->polled) {
1087		if (stm32_hash_wait_busy(hdev))
1088			err = -ETIMEDOUT;
1089		else {
1090			hdev->flags |= HASH_FLAGS_OUTPUT_READY;
1091			err = 0;
1092		}
1093	}
1094
1095	if (err != -EINPROGRESS)
1096	/* done task will not finish it, so do it here */
1097		stm32_hash_finish_req(req, err);
1098
1099	return 0;
1100}
1101
1102static int stm32_hash_copy_sgs(struct stm32_hash_request_ctx *rctx,
1103			       struct scatterlist *sg, int bs,
1104			       unsigned int new_len)
1105{
1106	struct stm32_hash_state *state = &rctx->state;
1107	int pages;
1108	void *buf;
1109
1110	pages = get_order(new_len);
1111
1112	buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
1113	if (!buf) {
1114		pr_err("Couldn't allocate pages for unaligned cases.\n");
1115		return -ENOMEM;
1116	}
1117
1118	if (state->bufcnt)
1119		memcpy(buf, rctx->hdev->xmit_buf, state->bufcnt);
1120
1121	scatterwalk_map_and_copy(buf + state->bufcnt, sg, rctx->offset,
1122				 min(new_len, rctx->total) - state->bufcnt, 0);
1123	sg_init_table(rctx->sgl, 1);
1124	sg_set_buf(rctx->sgl, buf, new_len);
1125	rctx->sg = rctx->sgl;
1126	state->flags |= HASH_FLAGS_SGS_COPIED;
1127	rctx->nents = 1;
1128	rctx->offset += new_len - state->bufcnt;
1129	state->bufcnt = 0;
1130	rctx->total = new_len;
1131
1132	return 0;
1133}
1134
1135static int stm32_hash_align_sgs(struct scatterlist *sg,
1136				int nbytes, int bs, bool init, bool final,
1137				struct stm32_hash_request_ctx *rctx)
1138{
1139	struct stm32_hash_state *state = &rctx->state;
1140	struct stm32_hash_dev *hdev = rctx->hdev;
1141	struct scatterlist *sg_tmp = sg;
1142	int offset = rctx->offset;
1143	int new_len;
1144	int n = 0;
1145	int bufcnt = state->bufcnt;
1146	bool secure_ctx = hdev->pdata->context_secured;
1147	bool aligned = true;
1148
1149	if (!sg || !sg->length || !nbytes) {
1150		if (bufcnt) {
1151			bufcnt = DIV_ROUND_UP(bufcnt, bs) * bs;
1152			sg_init_table(rctx->sgl, 1);
1153			sg_set_buf(rctx->sgl, rctx->hdev->xmit_buf, bufcnt);
1154			rctx->sg = rctx->sgl;
1155			rctx->nents = 1;
1156		}
1157
 
1158		return 0;
1159	}
1160
1161	new_len = nbytes;
 
 
1162
1163	if (offset)
1164		aligned = false;
1165
1166	if (final) {
1167		new_len = DIV_ROUND_UP(new_len, bs) * bs;
1168	} else {
1169		new_len = (new_len - 1) / bs * bs; // return n block - 1 block
1170
1171		/*
1172		 * Context save in some version of HASH IP can only be done when the
1173		 * FIFO is ready to get a new block. This implies to send n block plus a
1174		 * 32 bit word in the first DMA send.
1175		 */
1176		if (init && secure_ctx) {
1177			new_len += sizeof(u32);
1178			if (unlikely(new_len > nbytes))
1179				new_len -= bs;
1180		}
1181	}
1182
1183	if (!new_len)
1184		return 0;
1185
1186	if (nbytes != new_len)
1187		aligned = false;
 
1188
1189	while (nbytes > 0 && sg_tmp) {
1190		n++;
1191
1192		if (bufcnt) {
1193			if (!IS_ALIGNED(bufcnt, bs)) {
1194				aligned = false;
1195				break;
1196			}
1197			nbytes -= bufcnt;
1198			bufcnt = 0;
1199			if (!nbytes)
1200				aligned = false;
1201
1202			continue;
1203		}
1204
1205		if (offset < sg_tmp->length) {
1206			if (!IS_ALIGNED(offset + sg_tmp->offset, 4)) {
1207				aligned = false;
1208				break;
1209			}
1210
1211			if (!IS_ALIGNED(sg_tmp->length - offset, bs)) {
1212				aligned = false;
1213				break;
1214			}
1215		}
1216
1217		if (offset) {
1218			offset -= sg_tmp->length;
1219			if (offset < 0) {
1220				nbytes += offset;
1221				offset = 0;
1222			}
1223		} else {
1224			nbytes -= sg_tmp->length;
1225		}
1226
1227		sg_tmp = sg_next(sg_tmp);
1228
1229		if (nbytes < 0) {
1230			aligned = false;
1231			break;
1232		}
1233	}
1234
1235	if (!aligned)
1236		return stm32_hash_copy_sgs(rctx, sg, bs, new_len);
1237
1238	rctx->total = new_len;
1239	rctx->offset += new_len;
1240	rctx->nents = n;
1241	if (state->bufcnt) {
1242		sg_init_table(rctx->sgl, 2);
1243		sg_set_buf(rctx->sgl, rctx->hdev->xmit_buf, state->bufcnt);
1244		sg_chain(rctx->sgl, 2, sg);
1245		rctx->sg = rctx->sgl;
1246	} else {
1247		rctx->sg = sg;
1248	}
1249
1250	return 0;
1251}
1252
1253static int stm32_hash_prepare_request(struct ahash_request *req)
1254{
1255	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1256	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1257	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
 
1258	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
1259	struct stm32_hash_state *state = &rctx->state;
1260	unsigned int nbytes;
1261	int ret, hash_later, bs;
1262	bool update = rctx->op & HASH_OP_UPDATE;
1263	bool init = !(state->flags & HASH_FLAGS_INIT);
1264	bool finup = state->flags & HASH_FLAGS_FINUP;
1265	bool final = state->flags & HASH_FLAGS_FINAL;
1266
1267	if (!hdev->dma_lch || state->flags & HASH_FLAGS_CPU)
1268		return 0;
1269
1270	bs = crypto_ahash_blocksize(tfm);
1271
1272	nbytes = state->bufcnt;
 
1273
1274	/*
1275	 * In case of update request nbytes must correspond to the content of the
1276	 * buffer + the offset minus the content of the request already in the
1277	 * buffer.
1278	 */
1279	if (update || finup)
1280		nbytes += req->nbytes - rctx->offset;
1281
1282	dev_dbg(hdev->dev,
1283		"%s: nbytes=%d, bs=%d, total=%d, offset=%d, bufcnt=%d\n",
1284		__func__, nbytes, bs, rctx->total, rctx->offset, state->bufcnt);
1285
1286	if (!nbytes)
1287		return 0;
1288
1289	rctx->total = nbytes;
1290
1291	if (update && req->nbytes && (!IS_ALIGNED(state->bufcnt, bs))) {
1292		int len = bs - state->bufcnt % bs;
1293
1294		if (len > req->nbytes)
1295			len = req->nbytes;
1296		scatterwalk_map_and_copy(state->buffer + state->bufcnt, req->src,
1297					 0, len, 0);
1298		state->bufcnt += len;
1299		rctx->offset = len;
1300	}
1301
1302	/* copy buffer in a temporary one that is used for sg alignment */
1303	if (state->bufcnt)
1304		memcpy(hdev->xmit_buf, state->buffer, state->bufcnt);
1305
1306	ret = stm32_hash_align_sgs(req->src, nbytes, bs, init, final, rctx);
1307	if (ret)
1308		return ret;
1309
1310	hash_later = nbytes - rctx->total;
1311	if (hash_later < 0)
1312		hash_later = 0;
1313
1314	if (hash_later && hash_later <= state->blocklen) {
1315		scatterwalk_map_and_copy(state->buffer,
1316					 req->src,
1317					 req->nbytes - hash_later,
1318					 hash_later, 0);
1319
1320		state->bufcnt = hash_later;
1321	} else {
1322		state->bufcnt = 0;
1323	}
1324
1325	if (hash_later > state->blocklen) {
1326		/* FIXME: add support of this case */
1327		pr_err("Buffer contains more than one block.\n");
1328		return -ENOMEM;
1329	}
1330
1331	rctx->total = min(nbytes, rctx->total);
1332
1333	return 0;
1334}
1335
1336static void stm32_hash_unprepare_request(struct ahash_request *req)
1337{
1338	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1339	struct stm32_hash_state *state = &rctx->state;
1340	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
1341	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
1342	u32 *preg = state->hw_context;
1343	int swap_reg, i;
1344
1345	if (hdev->dma_lch)
1346		dmaengine_terminate_sync(hdev->dma_lch);
1347
1348	if (state->flags & HASH_FLAGS_SGS_COPIED)
1349		free_pages((unsigned long)sg_virt(rctx->sg), get_order(rctx->sg->length));
1350
1351	rctx->sg = NULL;
1352	rctx->offset = 0;
 
1353
1354	state->flags &= ~(HASH_FLAGS_SGS_COPIED);
1355
1356	if (!(hdev->flags & HASH_FLAGS_INIT))
1357		goto pm_runtime;
1358
1359	state->flags |= HASH_FLAGS_INIT;
1360
1361	if (stm32_hash_wait_busy(hdev)) {
1362		dev_warn(hdev->dev, "Wait busy failed.");
1363		return;
1364	}
1365
1366	swap_reg = hash_swap_reg(rctx);
1367
1368	if (!hdev->pdata->ux500)
1369		*preg++ = stm32_hash_read(hdev, HASH_IMR);
1370	*preg++ = stm32_hash_read(hdev, HASH_STR);
1371	*preg++ = stm32_hash_read(hdev, HASH_CR);
1372	for (i = 0; i < swap_reg; i++)
1373		*preg++ = stm32_hash_read(hdev, HASH_CSR(i));
1374
1375pm_runtime:
1376	pm_runtime_mark_last_busy(hdev->dev);
1377	pm_runtime_put_autosuspend(hdev->dev);
1378}
1379
1380static int stm32_hash_enqueue(struct ahash_request *req, unsigned int op)
1381{
1382	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1383	struct stm32_hash_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1384	struct stm32_hash_dev *hdev = ctx->hdev;
1385
1386	rctx->op = op;
1387
1388	return stm32_hash_handle_queue(hdev, req);
1389}
1390
1391static int stm32_hash_update(struct ahash_request *req)
1392{
1393	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1394	struct stm32_hash_state *state = &rctx->state;
 
 
 
 
1395
1396	if (!req->nbytes)
1397		return 0;
1398
 
1399
1400	if (state->flags & HASH_FLAGS_CPU) {
1401		rctx->total = req->nbytes;
1402		rctx->sg = req->src;
1403		rctx->offset = 0;
1404
1405		if ((state->bufcnt + rctx->total < state->blocklen)) {
1406			stm32_hash_append_sg(rctx);
1407			return 0;
1408		}
1409	} else { /* DMA mode */
1410		if (state->bufcnt + req->nbytes <= state->blocklen) {
1411			scatterwalk_map_and_copy(state->buffer + state->bufcnt, req->src,
1412						 0, req->nbytes, 0);
1413			state->bufcnt += req->nbytes;
1414			return 0;
1415		}
1416	}
1417
1418	return stm32_hash_enqueue(req, HASH_OP_UPDATE);
1419}
1420
1421static int stm32_hash_final(struct ahash_request *req)
1422{
1423	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1424	struct stm32_hash_state *state = &rctx->state;
1425
1426	state->flags |= HASH_FLAGS_FINAL;
 
1427
1428	return stm32_hash_enqueue(req, HASH_OP_FINAL);
1429}
1430
1431static int stm32_hash_finup(struct ahash_request *req)
1432{
1433	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1434	struct stm32_hash_state *state = &rctx->state;
1435
1436	if (!req->nbytes)
1437		goto out;
1438
1439	state->flags |= HASH_FLAGS_FINUP;
1440
1441	if ((state->flags & HASH_FLAGS_CPU)) {
1442		rctx->total = req->nbytes;
1443		rctx->sg = req->src;
1444		rctx->offset = 0;
1445	}
1446
1447out:
1448	return stm32_hash_final(req);
1449}
1450
1451static int stm32_hash_digest(struct ahash_request *req)
1452{
1453	return stm32_hash_init(req) ?: stm32_hash_finup(req);
1454}
1455
1456static int stm32_hash_export(struct ahash_request *req, void *out)
1457{
1458	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1459
1460	memcpy(out, &rctx->state, sizeof(rctx->state));
1461
1462	return 0;
1463}
1464
1465static int stm32_hash_import(struct ahash_request *req, const void *in)
1466{
1467	struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
1468
1469	stm32_hash_init(req);
1470	memcpy(&rctx->state, in, sizeof(rctx->state));
1471
1472	return 0;
1473}
1474
1475static int stm32_hash_setkey(struct crypto_ahash *tfm,
1476			     const u8 *key, unsigned int keylen)
1477{
1478	struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1479
1480	if (keylen <= HASH_MAX_KEY_SIZE) {
1481		memcpy(ctx->key, key, keylen);
1482		ctx->keylen = keylen;
1483	} else {
1484		return -ENOMEM;
1485	}
1486
1487	return 0;
1488}
1489
1490static int stm32_hash_init_fallback(struct crypto_tfm *tfm)
1491{
1492	struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1493	struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
1494	const char *name = crypto_tfm_alg_name(tfm);
1495	struct crypto_shash *xtfm;
1496
1497	/* The fallback is only needed on Ux500 */
1498	if (!hdev->pdata->ux500)
1499		return 0;
1500
1501	xtfm = crypto_alloc_shash(name, 0, CRYPTO_ALG_NEED_FALLBACK);
1502	if (IS_ERR(xtfm)) {
1503		dev_err(hdev->dev, "failed to allocate %s fallback\n",
1504			name);
1505		return PTR_ERR(xtfm);
1506	}
1507	dev_info(hdev->dev, "allocated %s fallback\n", name);
1508	ctx->xtfm = xtfm;
1509
1510	return 0;
1511}
1512
1513static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm, u32 algs_flags)
1514{
1515	struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1516
1517	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1518				 sizeof(struct stm32_hash_request_ctx));
1519
1520	ctx->keylen = 0;
1521
1522	if (algs_flags)
1523		ctx->flags |= algs_flags;
1524
1525	return stm32_hash_init_fallback(tfm);
 
 
 
1526}
1527
1528static int stm32_hash_cra_init(struct crypto_tfm *tfm)
1529{
1530	return stm32_hash_cra_init_algs(tfm, 0);
1531}
1532
1533static int stm32_hash_cra_hmac_init(struct crypto_tfm *tfm)
1534{
1535	return stm32_hash_cra_init_algs(tfm, HASH_FLAGS_HMAC);
1536}
1537
1538static int stm32_hash_cra_sha3_init(struct crypto_tfm *tfm)
1539{
1540	return stm32_hash_cra_init_algs(tfm, HASH_FLAGS_SHA3_MODE);
1541}
1542
1543static int stm32_hash_cra_sha3_hmac_init(struct crypto_tfm *tfm)
1544{
1545	return stm32_hash_cra_init_algs(tfm, HASH_FLAGS_SHA3_MODE |
1546					HASH_FLAGS_HMAC);
1547}
1548
1549static void stm32_hash_cra_exit(struct crypto_tfm *tfm)
1550{
1551	struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1552
1553	if (ctx->xtfm)
1554		crypto_free_shash(ctx->xtfm);
1555}
1556
1557static irqreturn_t stm32_hash_irq_thread(int irq, void *dev_id)
1558{
1559	struct stm32_hash_dev *hdev = dev_id;
1560
1561	if (HASH_FLAGS_OUTPUT_READY & hdev->flags) {
1562		hdev->flags &= ~HASH_FLAGS_OUTPUT_READY;
1563		goto finish;
 
 
 
 
 
 
 
1564	}
1565
1566	return IRQ_HANDLED;
1567
1568finish:
1569	/* Finish current request */
1570	stm32_hash_finish_req(hdev->req, 0);
1571
1572	return IRQ_HANDLED;
1573}
1574
1575static irqreturn_t stm32_hash_irq_handler(int irq, void *dev_id)
1576{
1577	struct stm32_hash_dev *hdev = dev_id;
1578	u32 reg;
1579
1580	reg = stm32_hash_read(hdev, HASH_SR);
1581	if (reg & HASH_SR_OUTPUT_READY) {
 
 
1582		hdev->flags |= HASH_FLAGS_OUTPUT_READY;
1583		/* Disable IT*/
1584		stm32_hash_write(hdev, HASH_IMR, 0);
1585		return IRQ_WAKE_THREAD;
1586	}
1587
1588	return IRQ_NONE;
1589}
1590
1591static struct ahash_engine_alg algs_md5[] = {
1592	{
1593		.base.init = stm32_hash_init,
1594		.base.update = stm32_hash_update,
1595		.base.final = stm32_hash_final,
1596		.base.finup = stm32_hash_finup,
1597		.base.digest = stm32_hash_digest,
1598		.base.export = stm32_hash_export,
1599		.base.import = stm32_hash_import,
1600		.base.halg = {
1601			.digestsize = MD5_DIGEST_SIZE,
1602			.statesize = sizeof(struct stm32_hash_state),
1603			.base = {
1604				.cra_name = "md5",
1605				.cra_driver_name = "stm32-md5",
1606				.cra_priority = 200,
1607				.cra_flags = CRYPTO_ALG_ASYNC |
1608					CRYPTO_ALG_KERN_DRIVER_ONLY,
1609				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1610				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
 
1611				.cra_init = stm32_hash_cra_init,
1612				.cra_exit = stm32_hash_cra_exit,
1613				.cra_module = THIS_MODULE,
1614			}
1615		},
1616		.op = {
1617			.do_one_request = stm32_hash_one_request,
1618		},
1619	},
1620	{
1621		.base.init = stm32_hash_init,
1622		.base.update = stm32_hash_update,
1623		.base.final = stm32_hash_final,
1624		.base.finup = stm32_hash_finup,
1625		.base.digest = stm32_hash_digest,
1626		.base.export = stm32_hash_export,
1627		.base.import = stm32_hash_import,
1628		.base.setkey = stm32_hash_setkey,
1629		.base.halg = {
1630			.digestsize = MD5_DIGEST_SIZE,
1631			.statesize = sizeof(struct stm32_hash_state),
1632			.base = {
1633				.cra_name = "hmac(md5)",
1634				.cra_driver_name = "stm32-hmac-md5",
1635				.cra_priority = 200,
1636				.cra_flags = CRYPTO_ALG_ASYNC |
1637					CRYPTO_ALG_KERN_DRIVER_ONLY,
1638				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1639				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1640				.cra_init = stm32_hash_cra_hmac_init,
1641				.cra_exit = stm32_hash_cra_exit,
1642				.cra_module = THIS_MODULE,
1643			}
1644		},
1645		.op = {
1646			.do_one_request = stm32_hash_one_request,
1647		},
1648	}
1649};
1650
1651static struct ahash_engine_alg algs_sha1[] = {
1652	{
1653		.base.init = stm32_hash_init,
1654		.base.update = stm32_hash_update,
1655		.base.final = stm32_hash_final,
1656		.base.finup = stm32_hash_finup,
1657		.base.digest = stm32_hash_digest,
1658		.base.export = stm32_hash_export,
1659		.base.import = stm32_hash_import,
1660		.base.halg = {
1661			.digestsize = SHA1_DIGEST_SIZE,
1662			.statesize = sizeof(struct stm32_hash_state),
1663			.base = {
1664				.cra_name = "sha1",
1665				.cra_driver_name = "stm32-sha1",
1666				.cra_priority = 200,
1667				.cra_flags = CRYPTO_ALG_ASYNC |
1668					CRYPTO_ALG_KERN_DRIVER_ONLY,
1669				.cra_blocksize = SHA1_BLOCK_SIZE,
1670				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
 
1671				.cra_init = stm32_hash_cra_init,
1672				.cra_exit = stm32_hash_cra_exit,
1673				.cra_module = THIS_MODULE,
1674			}
1675		},
1676		.op = {
1677			.do_one_request = stm32_hash_one_request,
1678		},
1679	},
1680	{
1681		.base.init = stm32_hash_init,
1682		.base.update = stm32_hash_update,
1683		.base.final = stm32_hash_final,
1684		.base.finup = stm32_hash_finup,
1685		.base.digest = stm32_hash_digest,
1686		.base.export = stm32_hash_export,
1687		.base.import = stm32_hash_import,
1688		.base.setkey = stm32_hash_setkey,
1689		.base.halg = {
1690			.digestsize = SHA1_DIGEST_SIZE,
1691			.statesize = sizeof(struct stm32_hash_state),
1692			.base = {
1693				.cra_name = "hmac(sha1)",
1694				.cra_driver_name = "stm32-hmac-sha1",
1695				.cra_priority = 200,
1696				.cra_flags = CRYPTO_ALG_ASYNC |
1697					CRYPTO_ALG_KERN_DRIVER_ONLY,
1698				.cra_blocksize = SHA1_BLOCK_SIZE,
1699				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1700				.cra_init = stm32_hash_cra_hmac_init,
1701				.cra_exit = stm32_hash_cra_exit,
1702				.cra_module = THIS_MODULE,
1703			}
1704		},
1705		.op = {
1706			.do_one_request = stm32_hash_one_request,
1707		},
1708	},
1709};
1710
1711static struct ahash_engine_alg algs_sha224[] = {
1712	{
1713		.base.init = stm32_hash_init,
1714		.base.update = stm32_hash_update,
1715		.base.final = stm32_hash_final,
1716		.base.finup = stm32_hash_finup,
1717		.base.digest = stm32_hash_digest,
1718		.base.export = stm32_hash_export,
1719		.base.import = stm32_hash_import,
1720		.base.halg = {
1721			.digestsize = SHA224_DIGEST_SIZE,
1722			.statesize = sizeof(struct stm32_hash_state),
1723			.base = {
1724				.cra_name = "sha224",
1725				.cra_driver_name = "stm32-sha224",
1726				.cra_priority = 200,
1727				.cra_flags = CRYPTO_ALG_ASYNC |
1728					CRYPTO_ALG_KERN_DRIVER_ONLY,
1729				.cra_blocksize = SHA224_BLOCK_SIZE,
1730				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
 
1731				.cra_init = stm32_hash_cra_init,
1732				.cra_exit = stm32_hash_cra_exit,
1733				.cra_module = THIS_MODULE,
1734			}
1735		},
1736		.op = {
1737			.do_one_request = stm32_hash_one_request,
1738		},
1739	},
1740	{
1741		.base.init = stm32_hash_init,
1742		.base.update = stm32_hash_update,
1743		.base.final = stm32_hash_final,
1744		.base.finup = stm32_hash_finup,
1745		.base.digest = stm32_hash_digest,
1746		.base.setkey = stm32_hash_setkey,
1747		.base.export = stm32_hash_export,
1748		.base.import = stm32_hash_import,
1749		.base.halg = {
1750			.digestsize = SHA224_DIGEST_SIZE,
1751			.statesize = sizeof(struct stm32_hash_state),
1752			.base = {
1753				.cra_name = "hmac(sha224)",
1754				.cra_driver_name = "stm32-hmac-sha224",
1755				.cra_priority = 200,
1756				.cra_flags = CRYPTO_ALG_ASYNC |
1757					CRYPTO_ALG_KERN_DRIVER_ONLY,
1758				.cra_blocksize = SHA224_BLOCK_SIZE,
1759				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1760				.cra_init = stm32_hash_cra_hmac_init,
1761				.cra_exit = stm32_hash_cra_exit,
1762				.cra_module = THIS_MODULE,
1763			}
1764		},
1765		.op = {
1766			.do_one_request = stm32_hash_one_request,
1767		},
1768	},
1769};
1770
1771static struct ahash_engine_alg algs_sha256[] = {
1772	{
1773		.base.init = stm32_hash_init,
1774		.base.update = stm32_hash_update,
1775		.base.final = stm32_hash_final,
1776		.base.finup = stm32_hash_finup,
1777		.base.digest = stm32_hash_digest,
1778		.base.export = stm32_hash_export,
1779		.base.import = stm32_hash_import,
1780		.base.halg = {
1781			.digestsize = SHA256_DIGEST_SIZE,
1782			.statesize = sizeof(struct stm32_hash_state),
1783			.base = {
1784				.cra_name = "sha256",
1785				.cra_driver_name = "stm32-sha256",
1786				.cra_priority = 200,
1787				.cra_flags = CRYPTO_ALG_ASYNC |
1788					CRYPTO_ALG_KERN_DRIVER_ONLY,
1789				.cra_blocksize = SHA256_BLOCK_SIZE,
1790				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
 
1791				.cra_init = stm32_hash_cra_init,
1792				.cra_exit = stm32_hash_cra_exit,
1793				.cra_module = THIS_MODULE,
1794			}
1795		},
1796		.op = {
1797			.do_one_request = stm32_hash_one_request,
1798		},
1799	},
1800	{
1801		.base.init = stm32_hash_init,
1802		.base.update = stm32_hash_update,
1803		.base.final = stm32_hash_final,
1804		.base.finup = stm32_hash_finup,
1805		.base.digest = stm32_hash_digest,
1806		.base.export = stm32_hash_export,
1807		.base.import = stm32_hash_import,
1808		.base.setkey = stm32_hash_setkey,
1809		.base.halg = {
1810			.digestsize = SHA256_DIGEST_SIZE,
1811			.statesize = sizeof(struct stm32_hash_state),
1812			.base = {
1813				.cra_name = "hmac(sha256)",
1814				.cra_driver_name = "stm32-hmac-sha256",
1815				.cra_priority = 200,
1816				.cra_flags = CRYPTO_ALG_ASYNC |
1817					CRYPTO_ALG_KERN_DRIVER_ONLY,
1818				.cra_blocksize = SHA256_BLOCK_SIZE,
1819				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1820				.cra_init = stm32_hash_cra_hmac_init,
1821				.cra_exit = stm32_hash_cra_exit,
1822				.cra_module = THIS_MODULE,
1823			}
1824		},
1825		.op = {
1826			.do_one_request = stm32_hash_one_request,
1827		},
1828	},
1829};
1830
1831static struct ahash_engine_alg algs_sha384_sha512[] = {
1832	{
1833		.base.init = stm32_hash_init,
1834		.base.update = stm32_hash_update,
1835		.base.final = stm32_hash_final,
1836		.base.finup = stm32_hash_finup,
1837		.base.digest = stm32_hash_digest,
1838		.base.export = stm32_hash_export,
1839		.base.import = stm32_hash_import,
1840		.base.halg = {
1841			.digestsize = SHA384_DIGEST_SIZE,
1842			.statesize = sizeof(struct stm32_hash_state),
1843			.base = {
1844				.cra_name = "sha384",
1845				.cra_driver_name = "stm32-sha384",
1846				.cra_priority = 200,
1847				.cra_flags = CRYPTO_ALG_ASYNC |
1848					CRYPTO_ALG_KERN_DRIVER_ONLY,
1849				.cra_blocksize = SHA384_BLOCK_SIZE,
1850				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1851				.cra_init = stm32_hash_cra_init,
1852				.cra_exit = stm32_hash_cra_exit,
1853				.cra_module = THIS_MODULE,
1854			}
1855		},
1856		.op = {
1857			.do_one_request = stm32_hash_one_request,
1858		},
1859	},
1860	{
1861		.base.init = stm32_hash_init,
1862		.base.update = stm32_hash_update,
1863		.base.final = stm32_hash_final,
1864		.base.finup = stm32_hash_finup,
1865		.base.digest = stm32_hash_digest,
1866		.base.setkey = stm32_hash_setkey,
1867		.base.export = stm32_hash_export,
1868		.base.import = stm32_hash_import,
1869		.base.halg = {
1870			.digestsize = SHA384_DIGEST_SIZE,
1871			.statesize = sizeof(struct stm32_hash_state),
1872			.base = {
1873				.cra_name = "hmac(sha384)",
1874				.cra_driver_name = "stm32-hmac-sha384",
1875				.cra_priority = 200,
1876				.cra_flags = CRYPTO_ALG_ASYNC |
1877					CRYPTO_ALG_KERN_DRIVER_ONLY,
1878				.cra_blocksize = SHA384_BLOCK_SIZE,
1879				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1880				.cra_init = stm32_hash_cra_hmac_init,
1881				.cra_exit = stm32_hash_cra_exit,
1882				.cra_module = THIS_MODULE,
1883			}
1884		},
1885		.op = {
1886			.do_one_request = stm32_hash_one_request,
1887		},
1888	},
1889	{
1890		.base.init = stm32_hash_init,
1891		.base.update = stm32_hash_update,
1892		.base.final = stm32_hash_final,
1893		.base.finup = stm32_hash_finup,
1894		.base.digest = stm32_hash_digest,
1895		.base.export = stm32_hash_export,
1896		.base.import = stm32_hash_import,
1897		.base.halg = {
1898			.digestsize = SHA512_DIGEST_SIZE,
1899			.statesize = sizeof(struct stm32_hash_state),
1900			.base = {
1901				.cra_name = "sha512",
1902				.cra_driver_name = "stm32-sha512",
1903				.cra_priority = 200,
1904				.cra_flags = CRYPTO_ALG_ASYNC |
1905					CRYPTO_ALG_KERN_DRIVER_ONLY,
1906				.cra_blocksize = SHA512_BLOCK_SIZE,
1907				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1908				.cra_init = stm32_hash_cra_init,
1909				.cra_exit = stm32_hash_cra_exit,
1910				.cra_module = THIS_MODULE,
1911			}
1912		},
1913		.op = {
1914			.do_one_request = stm32_hash_one_request,
1915		},
1916	},
1917	{
1918		.base.init = stm32_hash_init,
1919		.base.update = stm32_hash_update,
1920		.base.final = stm32_hash_final,
1921		.base.finup = stm32_hash_finup,
1922		.base.digest = stm32_hash_digest,
1923		.base.export = stm32_hash_export,
1924		.base.import = stm32_hash_import,
1925		.base.setkey = stm32_hash_setkey,
1926		.base.halg = {
1927			.digestsize = SHA512_DIGEST_SIZE,
1928			.statesize = sizeof(struct stm32_hash_state),
1929			.base = {
1930				.cra_name = "hmac(sha512)",
1931				.cra_driver_name = "stm32-hmac-sha512",
1932				.cra_priority = 200,
1933				.cra_flags = CRYPTO_ALG_ASYNC |
1934					CRYPTO_ALG_KERN_DRIVER_ONLY,
1935				.cra_blocksize = SHA512_BLOCK_SIZE,
1936				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1937				.cra_init = stm32_hash_cra_hmac_init,
1938				.cra_exit = stm32_hash_cra_exit,
1939				.cra_module = THIS_MODULE,
1940			}
1941		},
1942		.op = {
1943			.do_one_request = stm32_hash_one_request,
1944		},
1945	},
1946};
1947
1948static struct ahash_engine_alg algs_sha3[] = {
1949	{
1950		.base.init = stm32_hash_init,
1951		.base.update = stm32_hash_update,
1952		.base.final = stm32_hash_final,
1953		.base.finup = stm32_hash_finup,
1954		.base.digest = stm32_hash_digest,
1955		.base.export = stm32_hash_export,
1956		.base.import = stm32_hash_import,
1957		.base.halg = {
1958			.digestsize = SHA3_224_DIGEST_SIZE,
1959			.statesize = sizeof(struct stm32_hash_state),
1960			.base = {
1961				.cra_name = "sha3-224",
1962				.cra_driver_name = "stm32-sha3-224",
1963				.cra_priority = 200,
1964				.cra_flags = CRYPTO_ALG_ASYNC |
1965					CRYPTO_ALG_KERN_DRIVER_ONLY,
1966				.cra_blocksize = SHA3_224_BLOCK_SIZE,
1967				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1968				.cra_init = stm32_hash_cra_sha3_init,
1969				.cra_exit = stm32_hash_cra_exit,
1970				.cra_module = THIS_MODULE,
1971			}
1972		},
1973		.op = {
1974			.do_one_request = stm32_hash_one_request,
1975		},
1976	},
1977	{
1978		.base.init = stm32_hash_init,
1979		.base.update = stm32_hash_update,
1980		.base.final = stm32_hash_final,
1981		.base.finup = stm32_hash_finup,
1982		.base.digest = stm32_hash_digest,
1983		.base.export = stm32_hash_export,
1984		.base.import = stm32_hash_import,
1985		.base.setkey = stm32_hash_setkey,
1986		.base.halg = {
1987			.digestsize = SHA3_224_DIGEST_SIZE,
1988			.statesize = sizeof(struct stm32_hash_state),
1989			.base = {
1990				.cra_name = "hmac(sha3-224)",
1991				.cra_driver_name = "stm32-hmac-sha3-224",
1992				.cra_priority = 200,
1993				.cra_flags = CRYPTO_ALG_ASYNC |
1994					CRYPTO_ALG_KERN_DRIVER_ONLY,
1995				.cra_blocksize = SHA3_224_BLOCK_SIZE,
1996				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
1997				.cra_init = stm32_hash_cra_sha3_hmac_init,
1998				.cra_exit = stm32_hash_cra_exit,
1999				.cra_module = THIS_MODULE,
2000			}
2001		},
2002		.op = {
2003			.do_one_request = stm32_hash_one_request,
2004		},
2005	},
2006	{
2007		.base.init = stm32_hash_init,
2008		.base.update = stm32_hash_update,
2009		.base.final = stm32_hash_final,
2010		.base.finup = stm32_hash_finup,
2011		.base.digest = stm32_hash_digest,
2012		.base.export = stm32_hash_export,
2013		.base.import = stm32_hash_import,
2014		.base.halg = {
2015			.digestsize = SHA3_256_DIGEST_SIZE,
2016			.statesize = sizeof(struct stm32_hash_state),
2017			.base = {
2018				.cra_name = "sha3-256",
2019				.cra_driver_name = "stm32-sha3-256",
2020				.cra_priority = 200,
2021				.cra_flags = CRYPTO_ALG_ASYNC |
2022					CRYPTO_ALG_KERN_DRIVER_ONLY,
2023				.cra_blocksize = SHA3_256_BLOCK_SIZE,
2024				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
2025				.cra_init = stm32_hash_cra_sha3_init,
2026				.cra_exit = stm32_hash_cra_exit,
2027				.cra_module = THIS_MODULE,
2028			}
2029		},
2030		.op = {
2031			.do_one_request = stm32_hash_one_request,
2032		},
2033	},
2034	{
2035		.base.init = stm32_hash_init,
2036		.base.update = stm32_hash_update,
2037		.base.final = stm32_hash_final,
2038		.base.finup = stm32_hash_finup,
2039		.base.digest = stm32_hash_digest,
2040		.base.export = stm32_hash_export,
2041		.base.import = stm32_hash_import,
2042		.base.setkey = stm32_hash_setkey,
2043		.base.halg = {
2044			.digestsize = SHA3_256_DIGEST_SIZE,
2045			.statesize = sizeof(struct stm32_hash_state),
2046			.base = {
2047				.cra_name = "hmac(sha3-256)",
2048				.cra_driver_name = "stm32-hmac-sha3-256",
2049				.cra_priority = 200,
2050				.cra_flags = CRYPTO_ALG_ASYNC |
2051					CRYPTO_ALG_KERN_DRIVER_ONLY,
2052				.cra_blocksize = SHA3_256_BLOCK_SIZE,
2053				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
2054				.cra_init = stm32_hash_cra_sha3_hmac_init,
2055				.cra_exit = stm32_hash_cra_exit,
2056				.cra_module = THIS_MODULE,
2057			}
2058		},
2059		.op = {
2060			.do_one_request = stm32_hash_one_request,
2061		},
2062	},
2063	{
2064		.base.init = stm32_hash_init,
2065		.base.update = stm32_hash_update,
2066		.base.final = stm32_hash_final,
2067		.base.finup = stm32_hash_finup,
2068		.base.digest = stm32_hash_digest,
2069		.base.export = stm32_hash_export,
2070		.base.import = stm32_hash_import,
2071		.base.halg = {
2072			.digestsize = SHA3_384_DIGEST_SIZE,
2073			.statesize = sizeof(struct stm32_hash_state),
2074			.base = {
2075				.cra_name = "sha3-384",
2076				.cra_driver_name = "stm32-sha3-384",
2077				.cra_priority = 200,
2078				.cra_flags = CRYPTO_ALG_ASYNC |
2079					CRYPTO_ALG_KERN_DRIVER_ONLY,
2080				.cra_blocksize = SHA3_384_BLOCK_SIZE,
2081				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
2082				.cra_init = stm32_hash_cra_sha3_init,
2083				.cra_exit = stm32_hash_cra_exit,
2084				.cra_module = THIS_MODULE,
2085			}
2086		},
2087		.op = {
2088			.do_one_request = stm32_hash_one_request,
2089		},
2090	},
2091	{
2092		.base.init = stm32_hash_init,
2093		.base.update = stm32_hash_update,
2094		.base.final = stm32_hash_final,
2095		.base.finup = stm32_hash_finup,
2096		.base.digest = stm32_hash_digest,
2097		.base.export = stm32_hash_export,
2098		.base.import = stm32_hash_import,
2099		.base.setkey = stm32_hash_setkey,
2100		.base.halg = {
2101			.digestsize = SHA3_384_DIGEST_SIZE,
2102			.statesize = sizeof(struct stm32_hash_state),
2103			.base = {
2104				.cra_name = "hmac(sha3-384)",
2105				.cra_driver_name = "stm32-hmac-sha3-384",
2106				.cra_priority = 200,
2107				.cra_flags = CRYPTO_ALG_ASYNC |
2108					CRYPTO_ALG_KERN_DRIVER_ONLY,
2109				.cra_blocksize = SHA3_384_BLOCK_SIZE,
2110				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
2111				.cra_init = stm32_hash_cra_sha3_hmac_init,
2112				.cra_exit = stm32_hash_cra_exit,
2113				.cra_module = THIS_MODULE,
2114			}
2115		},
2116		.op = {
2117			.do_one_request = stm32_hash_one_request,
2118		},
2119	},
2120	{
2121		.base.init = stm32_hash_init,
2122		.base.update = stm32_hash_update,
2123		.base.final = stm32_hash_final,
2124		.base.finup = stm32_hash_finup,
2125		.base.digest = stm32_hash_digest,
2126		.base.export = stm32_hash_export,
2127		.base.import = stm32_hash_import,
2128		.base.halg = {
2129			.digestsize = SHA3_512_DIGEST_SIZE,
2130			.statesize = sizeof(struct stm32_hash_state),
2131			.base = {
2132				.cra_name = "sha3-512",
2133				.cra_driver_name = "stm32-sha3-512",
2134				.cra_priority = 200,
2135				.cra_flags = CRYPTO_ALG_ASYNC |
2136					CRYPTO_ALG_KERN_DRIVER_ONLY,
2137				.cra_blocksize = SHA3_512_BLOCK_SIZE,
2138				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
2139				.cra_init = stm32_hash_cra_sha3_init,
2140				.cra_exit = stm32_hash_cra_exit,
2141				.cra_module = THIS_MODULE,
2142			}
2143		},
2144		.op = {
2145			.do_one_request = stm32_hash_one_request,
2146		},
2147	},
2148	{
2149		.base.init = stm32_hash_init,
2150		.base.update = stm32_hash_update,
2151		.base.final = stm32_hash_final,
2152		.base.finup = stm32_hash_finup,
2153		.base.digest = stm32_hash_digest,
2154		.base.export = stm32_hash_export,
2155		.base.import = stm32_hash_import,
2156		.base.setkey = stm32_hash_setkey,
2157		.base.halg = {
2158			.digestsize = SHA3_512_DIGEST_SIZE,
2159			.statesize = sizeof(struct stm32_hash_state),
2160			.base = {
2161				.cra_name = "hmac(sha3-512)",
2162				.cra_driver_name = "stm32-hmac-sha3-512",
2163				.cra_priority = 200,
2164				.cra_flags = CRYPTO_ALG_ASYNC |
2165					CRYPTO_ALG_KERN_DRIVER_ONLY,
2166				.cra_blocksize = SHA3_512_BLOCK_SIZE,
2167				.cra_ctxsize = sizeof(struct stm32_hash_ctx),
2168				.cra_init = stm32_hash_cra_sha3_hmac_init,
2169				.cra_exit = stm32_hash_cra_exit,
2170				.cra_module = THIS_MODULE,
2171			}
2172		},
2173		.op = {
2174			.do_one_request = stm32_hash_one_request,
2175		},
2176	}
2177};
2178
2179static int stm32_hash_register_algs(struct stm32_hash_dev *hdev)
2180{
2181	unsigned int i, j;
2182	int err;
2183
2184	for (i = 0; i < hdev->pdata->algs_info_size; i++) {
2185		for (j = 0; j < hdev->pdata->algs_info[i].size; j++) {
2186			err = crypto_engine_register_ahash(
2187				&hdev->pdata->algs_info[i].algs_list[j]);
2188			if (err)
2189				goto err_algs;
2190		}
2191	}
2192
2193	return 0;
2194err_algs:
2195	dev_err(hdev->dev, "Algo %d : %d failed\n", i, j);
2196	for (; i--; ) {
2197		for (; j--;)
2198			crypto_engine_unregister_ahash(
2199				&hdev->pdata->algs_info[i].algs_list[j]);
2200	}
2201
2202	return err;
2203}
2204
2205static int stm32_hash_unregister_algs(struct stm32_hash_dev *hdev)
2206{
2207	unsigned int i, j;
2208
2209	for (i = 0; i < hdev->pdata->algs_info_size; i++) {
2210		for (j = 0; j < hdev->pdata->algs_info[i].size; j++)
2211			crypto_engine_unregister_ahash(
2212				&hdev->pdata->algs_info[i].algs_list[j]);
2213	}
2214
2215	return 0;
2216}
2217
2218static struct stm32_hash_algs_info stm32_hash_algs_info_ux500[] = {
2219	{
2220		.algs_list	= algs_sha1,
2221		.size		= ARRAY_SIZE(algs_sha1),
2222	},
2223	{
2224		.algs_list	= algs_sha256,
2225		.size		= ARRAY_SIZE(algs_sha256),
2226	},
2227};
2228
2229static const struct stm32_hash_pdata stm32_hash_pdata_ux500 = {
2230	.alg_shift	= 7,
2231	.algs_info	= stm32_hash_algs_info_ux500,
2232	.algs_info_size	= ARRAY_SIZE(stm32_hash_algs_info_ux500),
2233	.broken_emptymsg = true,
2234	.ux500		= true,
2235};
2236
2237static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f4[] = {
2238	{
2239		.algs_list	= algs_md5,
2240		.size		= ARRAY_SIZE(algs_md5),
2241	},
2242	{
2243		.algs_list	= algs_sha1,
2244		.size		= ARRAY_SIZE(algs_sha1),
2245	},
2246};
2247
2248static const struct stm32_hash_pdata stm32_hash_pdata_stm32f4 = {
2249	.alg_shift	= 7,
2250	.algs_info	= stm32_hash_algs_info_stm32f4,
2251	.algs_info_size	= ARRAY_SIZE(stm32_hash_algs_info_stm32f4),
2252	.has_sr		= true,
2253	.has_mdmat	= true,
2254};
2255
2256static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f7[] = {
2257	{
2258		.algs_list	= algs_md5,
2259		.size		= ARRAY_SIZE(algs_md5),
2260	},
2261	{
2262		.algs_list	= algs_sha1,
2263		.size		= ARRAY_SIZE(algs_sha1),
2264	},
2265	{
2266		.algs_list	= algs_sha224,
2267		.size		= ARRAY_SIZE(algs_sha224),
2268	},
2269	{
2270		.algs_list	= algs_sha256,
2271		.size		= ARRAY_SIZE(algs_sha256),
2272	},
2273};
2274
2275static const struct stm32_hash_pdata stm32_hash_pdata_stm32f7 = {
2276	.alg_shift	= 7,
2277	.algs_info	= stm32_hash_algs_info_stm32f7,
2278	.algs_info_size	= ARRAY_SIZE(stm32_hash_algs_info_stm32f7),
2279	.has_sr		= true,
2280	.has_mdmat	= true,
2281};
2282
2283static struct stm32_hash_algs_info stm32_hash_algs_info_stm32mp13[] = {
2284	{
2285		.algs_list	= algs_sha1,
2286		.size		= ARRAY_SIZE(algs_sha1),
2287	},
2288	{
2289		.algs_list	= algs_sha224,
2290		.size		= ARRAY_SIZE(algs_sha224),
2291	},
2292	{
2293		.algs_list	= algs_sha256,
2294		.size		= ARRAY_SIZE(algs_sha256),
2295	},
2296	{
2297		.algs_list	= algs_sha384_sha512,
2298		.size		= ARRAY_SIZE(algs_sha384_sha512),
2299	},
2300	{
2301		.algs_list	= algs_sha3,
2302		.size		= ARRAY_SIZE(algs_sha3),
2303	},
2304};
2305
2306static const struct stm32_hash_pdata stm32_hash_pdata_stm32mp13 = {
2307	.alg_shift	= 17,
2308	.algs_info	= stm32_hash_algs_info_stm32mp13,
2309	.algs_info_size	= ARRAY_SIZE(stm32_hash_algs_info_stm32mp13),
2310	.has_sr		= true,
2311	.has_mdmat	= true,
2312	.context_secured = true,
2313};
2314
2315static const struct of_device_id stm32_hash_of_match[] = {
2316	{ .compatible = "stericsson,ux500-hash", .data = &stm32_hash_pdata_ux500 },
2317	{ .compatible = "st,stm32f456-hash", .data = &stm32_hash_pdata_stm32f4 },
2318	{ .compatible = "st,stm32f756-hash", .data = &stm32_hash_pdata_stm32f7 },
2319	{ .compatible = "st,stm32mp13-hash", .data = &stm32_hash_pdata_stm32mp13 },
2320	{},
2321};
2322
2323MODULE_DEVICE_TABLE(of, stm32_hash_of_match);
2324
2325static int stm32_hash_get_of_match(struct stm32_hash_dev *hdev,
2326				   struct device *dev)
2327{
2328	hdev->pdata = of_device_get_match_data(dev);
2329	if (!hdev->pdata) {
2330		dev_err(dev, "no compatible OF match\n");
2331		return -EINVAL;
2332	}
2333
 
 
 
 
 
 
2334	return 0;
2335}
2336
2337static int stm32_hash_probe(struct platform_device *pdev)
2338{
2339	struct stm32_hash_dev *hdev;
2340	struct device *dev = &pdev->dev;
2341	struct resource *res;
2342	int ret, irq;
2343
2344	hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL);
2345	if (!hdev)
2346		return -ENOMEM;
2347
2348	hdev->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
 
2349	if (IS_ERR(hdev->io_base))
2350		return PTR_ERR(hdev->io_base);
2351
2352	hdev->phys_base = res->start;
2353
2354	ret = stm32_hash_get_of_match(hdev, dev);
2355	if (ret)
2356		return ret;
2357
2358	irq = platform_get_irq_optional(pdev, 0);
2359	if (irq < 0 && irq != -ENXIO)
2360		return irq;
2361
2362	if (irq > 0) {
2363		ret = devm_request_threaded_irq(dev, irq,
2364						stm32_hash_irq_handler,
2365						stm32_hash_irq_thread,
2366						IRQF_ONESHOT,
2367						dev_name(dev), hdev);
2368		if (ret) {
2369			dev_err(dev, "Cannot grab IRQ\n");
2370			return ret;
2371		}
2372	} else {
2373		dev_info(dev, "No IRQ, use polling mode\n");
2374		hdev->polled = true;
2375	}
2376
2377	hdev->clk = devm_clk_get(&pdev->dev, NULL);
2378	if (IS_ERR(hdev->clk))
2379		return dev_err_probe(dev, PTR_ERR(hdev->clk),
2380				     "failed to get clock for hash\n");
 
 
 
 
 
2381
2382	ret = clk_prepare_enable(hdev->clk);
2383	if (ret) {
2384		dev_err(dev, "failed to enable hash clock (%d)\n", ret);
2385		return ret;
2386	}
2387
2388	pm_runtime_set_autosuspend_delay(dev, HASH_AUTOSUSPEND_DELAY);
2389	pm_runtime_use_autosuspend(dev);
2390
2391	pm_runtime_get_noresume(dev);
2392	pm_runtime_set_active(dev);
2393	pm_runtime_enable(dev);
2394
2395	hdev->rst = devm_reset_control_get(&pdev->dev, NULL);
2396	if (IS_ERR(hdev->rst)) {
2397		if (PTR_ERR(hdev->rst) == -EPROBE_DEFER) {
2398			ret = -EPROBE_DEFER;
2399			goto err_reset;
2400		}
2401	} else {
2402		reset_control_assert(hdev->rst);
2403		udelay(2);
2404		reset_control_deassert(hdev->rst);
2405	}
2406
2407	hdev->dev = dev;
2408
2409	platform_set_drvdata(pdev, hdev);
2410
2411	ret = stm32_hash_dma_init(hdev);
2412	switch (ret) {
2413	case 0:
2414		break;
2415	case -ENOENT:
2416	case -ENODEV:
2417		dev_info(dev, "DMA mode not available\n");
2418		break;
2419	default:
2420		dev_err(dev, "DMA init error %d\n", ret);
2421		goto err_dma;
2422	}
2423
2424	spin_lock(&stm32_hash.lock);
2425	list_add_tail(&hdev->list, &stm32_hash.dev_list);
2426	spin_unlock(&stm32_hash.lock);
2427
2428	/* Initialize crypto engine */
2429	hdev->engine = crypto_engine_alloc_init(dev, 1);
2430	if (!hdev->engine) {
2431		ret = -ENOMEM;
2432		goto err_engine;
2433	}
2434
2435	ret = crypto_engine_start(hdev->engine);
2436	if (ret)
2437		goto err_engine_start;
2438
2439	if (hdev->pdata->ux500)
2440		/* FIXME: implement DMA mode for Ux500 */
2441		hdev->dma_mode = 0;
2442	else
2443		hdev->dma_mode = stm32_hash_read(hdev, HASH_HWCFGR) & HASH_HWCFG_DMA_MASK;
2444
2445	/* Register algos */
2446	ret = stm32_hash_register_algs(hdev);
2447	if (ret)
2448		goto err_algs;
2449
2450	dev_info(dev, "Init HASH done HW ver %x DMA mode %u\n",
2451		 stm32_hash_read(hdev, HASH_VER), hdev->dma_mode);
2452
2453	pm_runtime_put_sync(dev);
2454
2455	return 0;
2456
2457err_algs:
2458err_engine_start:
2459	crypto_engine_exit(hdev->engine);
2460err_engine:
2461	spin_lock(&stm32_hash.lock);
2462	list_del(&hdev->list);
2463	spin_unlock(&stm32_hash.lock);
2464err_dma:
2465	if (hdev->dma_lch)
2466		dma_release_channel(hdev->dma_lch);
2467err_reset:
2468	pm_runtime_disable(dev);
2469	pm_runtime_put_noidle(dev);
2470
2471	clk_disable_unprepare(hdev->clk);
2472
2473	return ret;
2474}
2475
2476static void stm32_hash_remove(struct platform_device *pdev)
2477{
2478	struct stm32_hash_dev *hdev = platform_get_drvdata(pdev);
2479	int ret;
2480
 
 
 
 
2481	ret = pm_runtime_get_sync(hdev->dev);
 
 
2482
2483	stm32_hash_unregister_algs(hdev);
2484
2485	crypto_engine_exit(hdev->engine);
2486
2487	spin_lock(&stm32_hash.lock);
2488	list_del(&hdev->list);
2489	spin_unlock(&stm32_hash.lock);
2490
2491	if (hdev->dma_lch)
2492		dma_release_channel(hdev->dma_lch);
2493
2494	pm_runtime_disable(hdev->dev);
2495	pm_runtime_put_noidle(hdev->dev);
2496
2497	if (ret >= 0)
2498		clk_disable_unprepare(hdev->clk);
 
2499}
2500
2501#ifdef CONFIG_PM
2502static int stm32_hash_runtime_suspend(struct device *dev)
2503{
2504	struct stm32_hash_dev *hdev = dev_get_drvdata(dev);
2505
2506	clk_disable_unprepare(hdev->clk);
2507
2508	return 0;
2509}
2510
2511static int stm32_hash_runtime_resume(struct device *dev)
2512{
2513	struct stm32_hash_dev *hdev = dev_get_drvdata(dev);
2514	int ret;
2515
2516	ret = clk_prepare_enable(hdev->clk);
2517	if (ret) {
2518		dev_err(hdev->dev, "Failed to prepare_enable clock\n");
2519		return ret;
2520	}
2521
2522	return 0;
2523}
2524#endif
2525
2526static const struct dev_pm_ops stm32_hash_pm_ops = {
2527	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
2528				pm_runtime_force_resume)
2529	SET_RUNTIME_PM_OPS(stm32_hash_runtime_suspend,
2530			   stm32_hash_runtime_resume, NULL)
2531};
2532
2533static struct platform_driver stm32_hash_driver = {
2534	.probe		= stm32_hash_probe,
2535	.remove		= stm32_hash_remove,
2536	.driver		= {
2537		.name	= "stm32-hash",
2538		.pm = &stm32_hash_pm_ops,
2539		.of_match_table	= stm32_hash_of_match,
2540	}
2541};
2542
2543module_platform_driver(stm32_hash_driver);
2544
2545MODULE_DESCRIPTION("STM32 SHA1/SHA2/SHA3 & MD5 (HMAC) hw accelerator driver");
2546MODULE_AUTHOR("Lionel Debieve <lionel.debieve@st.com>");
2547MODULE_LICENSE("GPL v2");