Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2014 Imagination Technologies
   4 * Authors:  Will Thomas, James Hartley
   5 *
   6 *	Interface structure taken from omap-sham driver
   7 */
   8
   9#include <linux/clk.h>
  10#include <linux/dma-mapping.h>
  11#include <linux/dmaengine.h>
  12#include <linux/interrupt.h>
  13#include <linux/io.h>
  14#include <linux/kernel.h>
  15#include <linux/module.h>
  16#include <linux/mod_devicetable.h>
  17#include <linux/platform_device.h>
  18#include <linux/scatterlist.h>
  19
  20#include <crypto/internal/hash.h>
  21#include <crypto/md5.h>
  22#include <crypto/sha1.h>
  23#include <crypto/sha2.h>
  24
  25#define CR_RESET			0
  26#define CR_RESET_SET			1
  27#define CR_RESET_UNSET			0
  28
  29#define CR_MESSAGE_LENGTH_H		0x4
  30#define CR_MESSAGE_LENGTH_L		0x8
  31
  32#define CR_CONTROL			0xc
  33#define CR_CONTROL_BYTE_ORDER_3210	0
  34#define CR_CONTROL_BYTE_ORDER_0123	1
  35#define CR_CONTROL_BYTE_ORDER_2310	2
  36#define CR_CONTROL_BYTE_ORDER_1032	3
  37#define CR_CONTROL_BYTE_ORDER_SHIFT	8
  38#define CR_CONTROL_ALGO_MD5	0
  39#define CR_CONTROL_ALGO_SHA1	1
  40#define CR_CONTROL_ALGO_SHA224	2
  41#define CR_CONTROL_ALGO_SHA256	3
  42
  43#define CR_INTSTAT			0x10
  44#define CR_INTENAB			0x14
  45#define CR_INTCLEAR			0x18
  46#define CR_INT_RESULTS_AVAILABLE	BIT(0)
  47#define CR_INT_NEW_RESULTS_SET		BIT(1)
  48#define CR_INT_RESULT_READ_ERR		BIT(2)
  49#define CR_INT_MESSAGE_WRITE_ERROR	BIT(3)
  50#define CR_INT_STATUS			BIT(8)
  51
  52#define CR_RESULT_QUEUE		0x1c
  53#define CR_RSD0				0x40
  54#define CR_CORE_REV			0x50
  55#define CR_CORE_DES1		0x60
  56#define CR_CORE_DES2		0x70
  57
  58#define DRIVER_FLAGS_BUSY		BIT(0)
  59#define DRIVER_FLAGS_FINAL		BIT(1)
  60#define DRIVER_FLAGS_DMA_ACTIVE		BIT(2)
  61#define DRIVER_FLAGS_OUTPUT_READY	BIT(3)
  62#define DRIVER_FLAGS_INIT		BIT(4)
  63#define DRIVER_FLAGS_CPU		BIT(5)
  64#define DRIVER_FLAGS_DMA_READY		BIT(6)
  65#define DRIVER_FLAGS_ERROR		BIT(7)
  66#define DRIVER_FLAGS_SG			BIT(8)
  67#define DRIVER_FLAGS_SHA1		BIT(18)
  68#define DRIVER_FLAGS_SHA224		BIT(19)
  69#define DRIVER_FLAGS_SHA256		BIT(20)
  70#define DRIVER_FLAGS_MD5		BIT(21)
  71
  72#define IMG_HASH_QUEUE_LENGTH		20
  73#define IMG_HASH_DMA_BURST		4
  74#define IMG_HASH_DMA_THRESHOLD		64
  75
  76#ifdef __LITTLE_ENDIAN
  77#define IMG_HASH_BYTE_ORDER		CR_CONTROL_BYTE_ORDER_3210
  78#else
  79#define IMG_HASH_BYTE_ORDER		CR_CONTROL_BYTE_ORDER_0123
  80#endif
  81
  82struct img_hash_dev;
  83
  84struct img_hash_request_ctx {
  85	struct img_hash_dev	*hdev;
  86	u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
  87	unsigned long		flags;
  88	size_t			digsize;
  89
  90	dma_addr_t		dma_addr;
  91	size_t			dma_ct;
  92
  93	/* sg root */
  94	struct scatterlist	*sgfirst;
  95	/* walk state */
  96	struct scatterlist	*sg;
  97	size_t			nents;
  98	size_t			offset;
  99	unsigned int		total;
 100	size_t			sent;
 101
 102	unsigned long		op;
 103
 104	size_t			bufcnt;
 105	struct ahash_request	fallback_req;
 106
 107	/* Zero length buffer must remain last member of struct */
 108	u8 buffer[] __aligned(sizeof(u32));
 109};
 110
 111struct img_hash_ctx {
 112	struct img_hash_dev	*hdev;
 113	unsigned long		flags;
 114	struct crypto_ahash	*fallback;
 115};
 116
 117struct img_hash_dev {
 118	struct list_head	list;
 119	struct device		*dev;
 120	struct clk		*hash_clk;
 121	struct clk		*sys_clk;
 122	void __iomem		*io_base;
 123
 124	phys_addr_t		bus_addr;
 125	void __iomem		*cpu_addr;
 126
 127	spinlock_t		lock;
 128	int			err;
 129	struct tasklet_struct	done_task;
 130	struct tasklet_struct	dma_task;
 131
 132	unsigned long		flags;
 133	struct crypto_queue	queue;
 134	struct ahash_request	*req;
 135
 136	struct dma_chan		*dma_lch;
 137};
 138
 139struct img_hash_drv {
 140	struct list_head dev_list;
 141	spinlock_t lock;
 142};
 143
 144static struct img_hash_drv img_hash = {
 145	.dev_list = LIST_HEAD_INIT(img_hash.dev_list),
 146	.lock = __SPIN_LOCK_UNLOCKED(img_hash.lock),
 147};
 148
 149static inline u32 img_hash_read(struct img_hash_dev *hdev, u32 offset)
 150{
 151	return readl_relaxed(hdev->io_base + offset);
 152}
 153
 154static inline void img_hash_write(struct img_hash_dev *hdev,
 155				  u32 offset, u32 value)
 156{
 157	writel_relaxed(value, hdev->io_base + offset);
 158}
 159
 160static inline __be32 img_hash_read_result_queue(struct img_hash_dev *hdev)
 161{
 162	return cpu_to_be32(img_hash_read(hdev, CR_RESULT_QUEUE));
 163}
 164
 165static void img_hash_start(struct img_hash_dev *hdev, bool dma)
 166{
 167	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
 168	u32 cr = IMG_HASH_BYTE_ORDER << CR_CONTROL_BYTE_ORDER_SHIFT;
 169
 170	if (ctx->flags & DRIVER_FLAGS_MD5)
 171		cr |= CR_CONTROL_ALGO_MD5;
 172	else if (ctx->flags & DRIVER_FLAGS_SHA1)
 173		cr |= CR_CONTROL_ALGO_SHA1;
 174	else if (ctx->flags & DRIVER_FLAGS_SHA224)
 175		cr |= CR_CONTROL_ALGO_SHA224;
 176	else if (ctx->flags & DRIVER_FLAGS_SHA256)
 177		cr |= CR_CONTROL_ALGO_SHA256;
 178	dev_dbg(hdev->dev, "Starting hash process\n");
 179	img_hash_write(hdev, CR_CONTROL, cr);
 180
 181	/*
 182	 * The hardware block requires two cycles between writing the control
 183	 * register and writing the first word of data in non DMA mode, to
 184	 * ensure the first data write is not grouped in burst with the control
 185	 * register write a read is issued to 'flush' the bus.
 186	 */
 187	if (!dma)
 188		img_hash_read(hdev, CR_CONTROL);
 189}
 190
 191static int img_hash_xmit_cpu(struct img_hash_dev *hdev, const u8 *buf,
 192			     size_t length, int final)
 193{
 194	u32 count, len32;
 195	const u32 *buffer = (const u32 *)buf;
 196
 197	dev_dbg(hdev->dev, "xmit_cpu:  length: %zu bytes\n", length);
 198
 199	if (final)
 200		hdev->flags |= DRIVER_FLAGS_FINAL;
 201
 202	len32 = DIV_ROUND_UP(length, sizeof(u32));
 203
 204	for (count = 0; count < len32; count++)
 205		writel_relaxed(buffer[count], hdev->cpu_addr);
 206
 207	return -EINPROGRESS;
 208}
 209
 210static void img_hash_dma_callback(void *data)
 211{
 212	struct img_hash_dev *hdev = data;
 213	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
 214
 215	if (ctx->bufcnt) {
 216		img_hash_xmit_cpu(hdev, ctx->buffer, ctx->bufcnt, 0);
 217		ctx->bufcnt = 0;
 218	}
 219	if (ctx->sg)
 220		tasklet_schedule(&hdev->dma_task);
 221}
 222
 223static int img_hash_xmit_dma(struct img_hash_dev *hdev, struct scatterlist *sg)
 224{
 225	struct dma_async_tx_descriptor *desc;
 226	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
 227
 228	ctx->dma_ct = dma_map_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
 229	if (ctx->dma_ct == 0) {
 230		dev_err(hdev->dev, "Invalid DMA sg\n");
 231		hdev->err = -EINVAL;
 232		return -EINVAL;
 233	}
 234
 235	desc = dmaengine_prep_slave_sg(hdev->dma_lch,
 236				       sg,
 237				       ctx->dma_ct,
 238				       DMA_MEM_TO_DEV,
 239				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 240	if (!desc) {
 241		dev_err(hdev->dev, "Null DMA descriptor\n");
 242		hdev->err = -EINVAL;
 243		dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
 244		return -EINVAL;
 245	}
 246	desc->callback = img_hash_dma_callback;
 247	desc->callback_param = hdev;
 248	dmaengine_submit(desc);
 249	dma_async_issue_pending(hdev->dma_lch);
 250
 251	return 0;
 252}
 253
 254static int img_hash_write_via_cpu(struct img_hash_dev *hdev)
 255{
 256	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
 257
 258	ctx->bufcnt = sg_copy_to_buffer(hdev->req->src, sg_nents(ctx->sg),
 259					ctx->buffer, hdev->req->nbytes);
 260
 261	ctx->total = hdev->req->nbytes;
 262	ctx->bufcnt = 0;
 263
 264	hdev->flags |= (DRIVER_FLAGS_CPU | DRIVER_FLAGS_FINAL);
 265
 266	img_hash_start(hdev, false);
 267
 268	return img_hash_xmit_cpu(hdev, ctx->buffer, ctx->total, 1);
 269}
 270
 271static int img_hash_finish(struct ahash_request *req)
 272{
 273	struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
 274
 275	if (!req->result)
 276		return -EINVAL;
 277
 278	memcpy(req->result, ctx->digest, ctx->digsize);
 279
 280	return 0;
 281}
 282
 283static void img_hash_copy_hash(struct ahash_request *req)
 284{
 285	struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
 286	__be32 *hash = (__be32 *)ctx->digest;
 287	int i;
 288
 289	for (i = (ctx->digsize / sizeof(*hash)) - 1; i >= 0; i--)
 290		hash[i] = img_hash_read_result_queue(ctx->hdev);
 291}
 292
 293static void img_hash_finish_req(struct ahash_request *req, int err)
 294{
 295	struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
 296	struct img_hash_dev *hdev =  ctx->hdev;
 297
 298	if (!err) {
 299		img_hash_copy_hash(req);
 300		if (DRIVER_FLAGS_FINAL & hdev->flags)
 301			err = img_hash_finish(req);
 302	} else {
 303		dev_warn(hdev->dev, "Hash failed with error %d\n", err);
 304		ctx->flags |= DRIVER_FLAGS_ERROR;
 305	}
 306
 307	hdev->flags &= ~(DRIVER_FLAGS_DMA_READY | DRIVER_FLAGS_OUTPUT_READY |
 308		DRIVER_FLAGS_CPU | DRIVER_FLAGS_BUSY | DRIVER_FLAGS_FINAL);
 309
 310	if (req->base.complete)
 311		ahash_request_complete(req, err);
 312}
 313
 314static int img_hash_write_via_dma(struct img_hash_dev *hdev)
 315{
 316	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
 317
 318	img_hash_start(hdev, true);
 319
 320	dev_dbg(hdev->dev, "xmit dma size: %d\n", ctx->total);
 321
 322	if (!ctx->total)
 323		hdev->flags |= DRIVER_FLAGS_FINAL;
 324
 325	hdev->flags |= DRIVER_FLAGS_DMA_ACTIVE | DRIVER_FLAGS_FINAL;
 326
 327	tasklet_schedule(&hdev->dma_task);
 328
 329	return -EINPROGRESS;
 330}
 331
 332static int img_hash_dma_init(struct img_hash_dev *hdev)
 333{
 334	struct dma_slave_config dma_conf;
 335	int err;
 336
 337	hdev->dma_lch = dma_request_chan(hdev->dev, "tx");
 338	if (IS_ERR(hdev->dma_lch)) {
 339		dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n");
 340		return PTR_ERR(hdev->dma_lch);
 341	}
 342	dma_conf.direction = DMA_MEM_TO_DEV;
 343	dma_conf.dst_addr = hdev->bus_addr;
 344	dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 345	dma_conf.dst_maxburst = IMG_HASH_DMA_BURST;
 346	dma_conf.device_fc = false;
 347
 348	err = dmaengine_slave_config(hdev->dma_lch,  &dma_conf);
 349	if (err) {
 350		dev_err(hdev->dev, "Couldn't configure DMA slave.\n");
 351		dma_release_channel(hdev->dma_lch);
 352		return err;
 353	}
 354
 355	return 0;
 356}
 357
 358static void img_hash_dma_task(unsigned long d)
 359{
 360	struct img_hash_dev *hdev = (struct img_hash_dev *)d;
 361	struct img_hash_request_ctx *ctx;
 362	u8 *addr;
 363	size_t nbytes, bleft, wsend, len, tbc;
 364	struct scatterlist tsg;
 365
 366	if (!hdev->req)
 367		return;
 368
 369	ctx = ahash_request_ctx(hdev->req);
 370	if (!ctx->sg)
 371		return;
 372
 373	addr = sg_virt(ctx->sg);
 374	nbytes = ctx->sg->length - ctx->offset;
 375
 376	/*
 377	 * The hash accelerator does not support a data valid mask. This means
 378	 * that if each dma (i.e. per page) is not a multiple of 4 bytes, the
 379	 * padding bytes in the last word written by that dma would erroneously
 380	 * be included in the hash. To avoid this we round down the transfer,
 381	 * and add the excess to the start of the next dma. It does not matter
 382	 * that the final dma may not be a multiple of 4 bytes as the hashing
 383	 * block is programmed to accept the correct number of bytes.
 384	 */
 385
 386	bleft = nbytes % 4;
 387	wsend = (nbytes / 4);
 388
 389	if (wsend) {
 390		sg_init_one(&tsg, addr + ctx->offset, wsend * 4);
 391		if (img_hash_xmit_dma(hdev, &tsg)) {
 392			dev_err(hdev->dev, "DMA failed, falling back to CPU");
 393			ctx->flags |= DRIVER_FLAGS_CPU;
 394			hdev->err = 0;
 395			img_hash_xmit_cpu(hdev, addr + ctx->offset,
 396					  wsend * 4, 0);
 397			ctx->sent += wsend * 4;
 398			wsend = 0;
 399		} else {
 400			ctx->sent += wsend * 4;
 401		}
 402	}
 403
 404	if (bleft) {
 405		ctx->bufcnt = sg_pcopy_to_buffer(ctx->sgfirst, ctx->nents,
 406						 ctx->buffer, bleft, ctx->sent);
 407		tbc = 0;
 408		ctx->sg = sg_next(ctx->sg);
 409		while (ctx->sg && (ctx->bufcnt < 4)) {
 410			len = ctx->sg->length;
 411			if (likely(len > (4 - ctx->bufcnt)))
 412				len = 4 - ctx->bufcnt;
 413			tbc = sg_pcopy_to_buffer(ctx->sgfirst, ctx->nents,
 414						 ctx->buffer + ctx->bufcnt, len,
 415					ctx->sent + ctx->bufcnt);
 416			ctx->bufcnt += tbc;
 417			if (tbc >= ctx->sg->length) {
 418				ctx->sg = sg_next(ctx->sg);
 419				tbc = 0;
 420			}
 421		}
 422
 423		ctx->sent += ctx->bufcnt;
 424		ctx->offset = tbc;
 425
 426		if (!wsend)
 427			img_hash_dma_callback(hdev);
 428	} else {
 429		ctx->offset = 0;
 430		ctx->sg = sg_next(ctx->sg);
 431	}
 432}
 433
 434static int img_hash_write_via_dma_stop(struct img_hash_dev *hdev)
 435{
 436	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
 437
 438	if (ctx->flags & DRIVER_FLAGS_SG)
 439		dma_unmap_sg(hdev->dev, ctx->sg, ctx->dma_ct, DMA_TO_DEVICE);
 440
 441	return 0;
 442}
 443
 444static int img_hash_process_data(struct img_hash_dev *hdev)
 445{
 446	struct ahash_request *req = hdev->req;
 447	struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
 448	int err = 0;
 449
 450	ctx->bufcnt = 0;
 451
 452	if (req->nbytes >= IMG_HASH_DMA_THRESHOLD) {
 453		dev_dbg(hdev->dev, "process data request(%d bytes) using DMA\n",
 454			req->nbytes);
 455		err = img_hash_write_via_dma(hdev);
 456	} else {
 457		dev_dbg(hdev->dev, "process data request(%d bytes) using CPU\n",
 458			req->nbytes);
 459		err = img_hash_write_via_cpu(hdev);
 460	}
 461	return err;
 462}
 463
 464static int img_hash_hw_init(struct img_hash_dev *hdev)
 465{
 466	unsigned long long nbits;
 467	u32 u, l;
 468
 469	img_hash_write(hdev, CR_RESET, CR_RESET_SET);
 470	img_hash_write(hdev, CR_RESET, CR_RESET_UNSET);
 471	img_hash_write(hdev, CR_INTENAB, CR_INT_NEW_RESULTS_SET);
 472
 473	nbits = (u64)hdev->req->nbytes << 3;
 474	u = nbits >> 32;
 475	l = nbits;
 476	img_hash_write(hdev, CR_MESSAGE_LENGTH_H, u);
 477	img_hash_write(hdev, CR_MESSAGE_LENGTH_L, l);
 478
 479	if (!(DRIVER_FLAGS_INIT & hdev->flags)) {
 480		hdev->flags |= DRIVER_FLAGS_INIT;
 481		hdev->err = 0;
 482	}
 483	dev_dbg(hdev->dev, "hw initialized, nbits: %llx\n", nbits);
 484	return 0;
 485}
 486
 487static int img_hash_init(struct ahash_request *req)
 488{
 489	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 490	struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
 491	struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 492
 493	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
 494	rctx->fallback_req.base.flags =	req->base.flags
 495		& CRYPTO_TFM_REQ_MAY_SLEEP;
 496
 497	return crypto_ahash_init(&rctx->fallback_req);
 498}
 499
 500static int img_hash_handle_queue(struct img_hash_dev *hdev,
 501				 struct ahash_request *req)
 502{
 503	struct crypto_async_request *async_req, *backlog;
 504	struct img_hash_request_ctx *ctx;
 505	unsigned long flags;
 506	int err = 0, res = 0;
 507
 508	spin_lock_irqsave(&hdev->lock, flags);
 509
 510	if (req)
 511		res = ahash_enqueue_request(&hdev->queue, req);
 512
 513	if (DRIVER_FLAGS_BUSY & hdev->flags) {
 514		spin_unlock_irqrestore(&hdev->lock, flags);
 515		return res;
 516	}
 517
 518	backlog = crypto_get_backlog(&hdev->queue);
 519	async_req = crypto_dequeue_request(&hdev->queue);
 520	if (async_req)
 521		hdev->flags |= DRIVER_FLAGS_BUSY;
 522
 523	spin_unlock_irqrestore(&hdev->lock, flags);
 524
 525	if (!async_req)
 526		return res;
 527
 528	if (backlog)
 529		crypto_request_complete(backlog, -EINPROGRESS);
 530
 531	req = ahash_request_cast(async_req);
 532	hdev->req = req;
 533
 534	ctx = ahash_request_ctx(req);
 535
 536	dev_info(hdev->dev, "processing req, op: %lu, bytes: %d\n",
 537		 ctx->op, req->nbytes);
 538
 539	err = img_hash_hw_init(hdev);
 540
 541	if (!err)
 542		err = img_hash_process_data(hdev);
 543
 544	if (err != -EINPROGRESS) {
 545		/* done_task will not finish so do it here */
 546		img_hash_finish_req(req, err);
 547	}
 548	return res;
 549}
 550
 551static int img_hash_update(struct ahash_request *req)
 552{
 553	struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
 554	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 555	struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 556
 557	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
 558	rctx->fallback_req.base.flags = req->base.flags
 559		& CRYPTO_TFM_REQ_MAY_SLEEP;
 560	rctx->fallback_req.nbytes = req->nbytes;
 561	rctx->fallback_req.src = req->src;
 562
 563	return crypto_ahash_update(&rctx->fallback_req);
 564}
 565
 566static int img_hash_final(struct ahash_request *req)
 567{
 568	struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
 569	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 570	struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 571
 572	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
 573	rctx->fallback_req.base.flags = req->base.flags
 574		& CRYPTO_TFM_REQ_MAY_SLEEP;
 575	rctx->fallback_req.result = req->result;
 576
 577	return crypto_ahash_final(&rctx->fallback_req);
 578}
 579
 580static int img_hash_finup(struct ahash_request *req)
 581{
 582	struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
 583	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 584	struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 585
 586	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
 587	rctx->fallback_req.base.flags = req->base.flags
 588		& CRYPTO_TFM_REQ_MAY_SLEEP;
 589	rctx->fallback_req.nbytes = req->nbytes;
 590	rctx->fallback_req.src = req->src;
 591	rctx->fallback_req.result = req->result;
 592
 593	return crypto_ahash_finup(&rctx->fallback_req);
 594}
 595
 596static int img_hash_import(struct ahash_request *req, const void *in)
 597{
 598	struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
 599	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 600	struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 601
 602	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
 603	rctx->fallback_req.base.flags = req->base.flags
 604		& CRYPTO_TFM_REQ_MAY_SLEEP;
 605
 606	return crypto_ahash_import(&rctx->fallback_req, in);
 607}
 608
 609static int img_hash_export(struct ahash_request *req, void *out)
 610{
 611	struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
 612	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 613	struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 614
 615	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
 616	rctx->fallback_req.base.flags = req->base.flags
 617		& CRYPTO_TFM_REQ_MAY_SLEEP;
 618
 619	return crypto_ahash_export(&rctx->fallback_req, out);
 620}
 621
 622static int img_hash_digest(struct ahash_request *req)
 623{
 624	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 625	struct img_hash_ctx *tctx = crypto_ahash_ctx(tfm);
 626	struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
 627	struct img_hash_dev *hdev = NULL;
 628	struct img_hash_dev *tmp;
 629	int err;
 630
 631	spin_lock(&img_hash.lock);
 632	if (!tctx->hdev) {
 633		list_for_each_entry(tmp, &img_hash.dev_list, list) {
 634			hdev = tmp;
 635			break;
 636		}
 637		tctx->hdev = hdev;
 638
 639	} else {
 640		hdev = tctx->hdev;
 641	}
 642
 643	spin_unlock(&img_hash.lock);
 644	ctx->hdev = hdev;
 645	ctx->flags = 0;
 646	ctx->digsize = crypto_ahash_digestsize(tfm);
 647
 648	switch (ctx->digsize) {
 649	case SHA1_DIGEST_SIZE:
 650		ctx->flags |= DRIVER_FLAGS_SHA1;
 651		break;
 652	case SHA256_DIGEST_SIZE:
 653		ctx->flags |= DRIVER_FLAGS_SHA256;
 654		break;
 655	case SHA224_DIGEST_SIZE:
 656		ctx->flags |= DRIVER_FLAGS_SHA224;
 657		break;
 658	case MD5_DIGEST_SIZE:
 659		ctx->flags |= DRIVER_FLAGS_MD5;
 660		break;
 661	default:
 662		return -EINVAL;
 663	}
 664
 665	ctx->bufcnt = 0;
 666	ctx->offset = 0;
 667	ctx->sent = 0;
 668	ctx->total = req->nbytes;
 669	ctx->sg = req->src;
 670	ctx->sgfirst = req->src;
 671	ctx->nents = sg_nents(ctx->sg);
 672
 673	err = img_hash_handle_queue(tctx->hdev, req);
 674
 675	return err;
 676}
 677
 678static int img_hash_cra_init(struct crypto_tfm *tfm, const char *alg_name)
 679{
 680	struct img_hash_ctx *ctx = crypto_tfm_ctx(tfm);
 
 681
 682	ctx->fallback = crypto_alloc_ahash(alg_name, 0,
 683					   CRYPTO_ALG_NEED_FALLBACK);
 684	if (IS_ERR(ctx->fallback)) {
 685		pr_err("img_hash: Could not load fallback driver.\n");
 686		return PTR_ERR(ctx->fallback);
 
 687	}
 688	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 689				 sizeof(struct img_hash_request_ctx) +
 690				 crypto_ahash_reqsize(ctx->fallback) +
 691				 IMG_HASH_DMA_THRESHOLD);
 692
 693	return 0;
 
 
 
 694}
 695
 696static int img_hash_cra_md5_init(struct crypto_tfm *tfm)
 697{
 698	return img_hash_cra_init(tfm, "md5-generic");
 699}
 700
 701static int img_hash_cra_sha1_init(struct crypto_tfm *tfm)
 702{
 703	return img_hash_cra_init(tfm, "sha1-generic");
 704}
 705
 706static int img_hash_cra_sha224_init(struct crypto_tfm *tfm)
 707{
 708	return img_hash_cra_init(tfm, "sha224-generic");
 709}
 710
 711static int img_hash_cra_sha256_init(struct crypto_tfm *tfm)
 712{
 713	return img_hash_cra_init(tfm, "sha256-generic");
 714}
 715
 716static void img_hash_cra_exit(struct crypto_tfm *tfm)
 717{
 718	struct img_hash_ctx *tctx = crypto_tfm_ctx(tfm);
 719
 720	crypto_free_ahash(tctx->fallback);
 721}
 722
 723static irqreturn_t img_irq_handler(int irq, void *dev_id)
 724{
 725	struct img_hash_dev *hdev = dev_id;
 726	u32 reg;
 727
 728	reg = img_hash_read(hdev, CR_INTSTAT);
 729	img_hash_write(hdev, CR_INTCLEAR, reg);
 730
 731	if (reg & CR_INT_NEW_RESULTS_SET) {
 732		dev_dbg(hdev->dev, "IRQ CR_INT_NEW_RESULTS_SET\n");
 733		if (DRIVER_FLAGS_BUSY & hdev->flags) {
 734			hdev->flags |= DRIVER_FLAGS_OUTPUT_READY;
 735			if (!(DRIVER_FLAGS_CPU & hdev->flags))
 736				hdev->flags |= DRIVER_FLAGS_DMA_READY;
 737			tasklet_schedule(&hdev->done_task);
 738		} else {
 739			dev_warn(hdev->dev,
 740				 "HASH interrupt when no active requests.\n");
 741		}
 742	} else if (reg & CR_INT_RESULTS_AVAILABLE) {
 743		dev_warn(hdev->dev,
 744			 "IRQ triggered before the hash had completed\n");
 745	} else if (reg & CR_INT_RESULT_READ_ERR) {
 746		dev_warn(hdev->dev,
 747			 "Attempt to read from an empty result queue\n");
 748	} else if (reg & CR_INT_MESSAGE_WRITE_ERROR) {
 749		dev_warn(hdev->dev,
 750			 "Data written before the hardware was configured\n");
 751	}
 752	return IRQ_HANDLED;
 753}
 754
 755static struct ahash_alg img_algs[] = {
 756	{
 757		.init = img_hash_init,
 758		.update = img_hash_update,
 759		.final = img_hash_final,
 760		.finup = img_hash_finup,
 761		.export = img_hash_export,
 762		.import = img_hash_import,
 763		.digest = img_hash_digest,
 764		.halg = {
 765			.digestsize = MD5_DIGEST_SIZE,
 766			.statesize = sizeof(struct md5_state),
 767			.base = {
 768				.cra_name = "md5",
 769				.cra_driver_name = "img-md5",
 770				.cra_priority = 300,
 771				.cra_flags =
 772				CRYPTO_ALG_ASYNC |
 773				CRYPTO_ALG_NEED_FALLBACK,
 774				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
 775				.cra_ctxsize = sizeof(struct img_hash_ctx),
 776				.cra_init = img_hash_cra_md5_init,
 777				.cra_exit = img_hash_cra_exit,
 778				.cra_module = THIS_MODULE,
 779			}
 780		}
 781	},
 782	{
 783		.init = img_hash_init,
 784		.update = img_hash_update,
 785		.final = img_hash_final,
 786		.finup = img_hash_finup,
 787		.export = img_hash_export,
 788		.import = img_hash_import,
 789		.digest = img_hash_digest,
 790		.halg = {
 791			.digestsize = SHA1_DIGEST_SIZE,
 792			.statesize = sizeof(struct sha1_state),
 793			.base = {
 794				.cra_name = "sha1",
 795				.cra_driver_name = "img-sha1",
 796				.cra_priority = 300,
 797				.cra_flags =
 798				CRYPTO_ALG_ASYNC |
 799				CRYPTO_ALG_NEED_FALLBACK,
 800				.cra_blocksize = SHA1_BLOCK_SIZE,
 801				.cra_ctxsize = sizeof(struct img_hash_ctx),
 802				.cra_init = img_hash_cra_sha1_init,
 803				.cra_exit = img_hash_cra_exit,
 804				.cra_module = THIS_MODULE,
 805			}
 806		}
 807	},
 808	{
 809		.init = img_hash_init,
 810		.update = img_hash_update,
 811		.final = img_hash_final,
 812		.finup = img_hash_finup,
 813		.export = img_hash_export,
 814		.import = img_hash_import,
 815		.digest = img_hash_digest,
 816		.halg = {
 817			.digestsize = SHA224_DIGEST_SIZE,
 818			.statesize = sizeof(struct sha256_state),
 819			.base = {
 820				.cra_name = "sha224",
 821				.cra_driver_name = "img-sha224",
 822				.cra_priority = 300,
 823				.cra_flags =
 824				CRYPTO_ALG_ASYNC |
 825				CRYPTO_ALG_NEED_FALLBACK,
 826				.cra_blocksize = SHA224_BLOCK_SIZE,
 827				.cra_ctxsize = sizeof(struct img_hash_ctx),
 828				.cra_init = img_hash_cra_sha224_init,
 829				.cra_exit = img_hash_cra_exit,
 830				.cra_module = THIS_MODULE,
 831			}
 832		}
 833	},
 834	{
 835		.init = img_hash_init,
 836		.update = img_hash_update,
 837		.final = img_hash_final,
 838		.finup = img_hash_finup,
 839		.export = img_hash_export,
 840		.import = img_hash_import,
 841		.digest = img_hash_digest,
 842		.halg = {
 843			.digestsize = SHA256_DIGEST_SIZE,
 844			.statesize = sizeof(struct sha256_state),
 845			.base = {
 846				.cra_name = "sha256",
 847				.cra_driver_name = "img-sha256",
 848				.cra_priority = 300,
 849				.cra_flags =
 850				CRYPTO_ALG_ASYNC |
 851				CRYPTO_ALG_NEED_FALLBACK,
 852				.cra_blocksize = SHA256_BLOCK_SIZE,
 853				.cra_ctxsize = sizeof(struct img_hash_ctx),
 854				.cra_init = img_hash_cra_sha256_init,
 855				.cra_exit = img_hash_cra_exit,
 856				.cra_module = THIS_MODULE,
 857			}
 858		}
 859	}
 860};
 861
 862static int img_register_algs(struct img_hash_dev *hdev)
 863{
 864	int i, err;
 865
 866	for (i = 0; i < ARRAY_SIZE(img_algs); i++) {
 867		err = crypto_register_ahash(&img_algs[i]);
 868		if (err)
 869			goto err_reg;
 870	}
 871	return 0;
 872
 873err_reg:
 874	for (; i--; )
 875		crypto_unregister_ahash(&img_algs[i]);
 876
 877	return err;
 878}
 879
 880static int img_unregister_algs(struct img_hash_dev *hdev)
 881{
 882	int i;
 883
 884	for (i = 0; i < ARRAY_SIZE(img_algs); i++)
 885		crypto_unregister_ahash(&img_algs[i]);
 886	return 0;
 887}
 888
 889static void img_hash_done_task(unsigned long data)
 890{
 891	struct img_hash_dev *hdev = (struct img_hash_dev *)data;
 892	int err = 0;
 893
 894	if (hdev->err == -EINVAL) {
 895		err = hdev->err;
 896		goto finish;
 897	}
 898
 899	if (!(DRIVER_FLAGS_BUSY & hdev->flags)) {
 900		img_hash_handle_queue(hdev, NULL);
 901		return;
 902	}
 903
 904	if (DRIVER_FLAGS_CPU & hdev->flags) {
 905		if (DRIVER_FLAGS_OUTPUT_READY & hdev->flags) {
 906			hdev->flags &= ~DRIVER_FLAGS_OUTPUT_READY;
 907			goto finish;
 908		}
 909	} else if (DRIVER_FLAGS_DMA_READY & hdev->flags) {
 910		if (DRIVER_FLAGS_DMA_ACTIVE & hdev->flags) {
 911			hdev->flags &= ~DRIVER_FLAGS_DMA_ACTIVE;
 912			img_hash_write_via_dma_stop(hdev);
 913			if (hdev->err) {
 914				err = hdev->err;
 915				goto finish;
 916			}
 917		}
 918		if (DRIVER_FLAGS_OUTPUT_READY & hdev->flags) {
 919			hdev->flags &= ~(DRIVER_FLAGS_DMA_READY |
 920					DRIVER_FLAGS_OUTPUT_READY);
 921			goto finish;
 922		}
 923	}
 924	return;
 925
 926finish:
 927	img_hash_finish_req(hdev->req, err);
 928}
 929
 930static const struct of_device_id img_hash_match[] __maybe_unused = {
 931	{ .compatible = "img,hash-accelerator" },
 932	{}
 933};
 934MODULE_DEVICE_TABLE(of, img_hash_match);
 935
 936static int img_hash_probe(struct platform_device *pdev)
 937{
 938	struct img_hash_dev *hdev;
 939	struct device *dev = &pdev->dev;
 940	struct resource *hash_res;
 941	int	irq;
 942	int err;
 943
 944	hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL);
 945	if (hdev == NULL)
 946		return -ENOMEM;
 947
 948	spin_lock_init(&hdev->lock);
 949
 950	hdev->dev = dev;
 951
 952	platform_set_drvdata(pdev, hdev);
 953
 954	INIT_LIST_HEAD(&hdev->list);
 955
 956	tasklet_init(&hdev->done_task, img_hash_done_task, (unsigned long)hdev);
 957	tasklet_init(&hdev->dma_task, img_hash_dma_task, (unsigned long)hdev);
 958
 959	crypto_init_queue(&hdev->queue, IMG_HASH_QUEUE_LENGTH);
 960
 961	/* Register bank */
 962	hdev->io_base = devm_platform_ioremap_resource(pdev, 0);
 963	if (IS_ERR(hdev->io_base)) {
 964		err = PTR_ERR(hdev->io_base);
 965		goto res_err;
 966	}
 967
 968	/* Write port (DMA or CPU) */
 969	hdev->cpu_addr = devm_platform_get_and_ioremap_resource(pdev, 1, &hash_res);
 
 970	if (IS_ERR(hdev->cpu_addr)) {
 971		err = PTR_ERR(hdev->cpu_addr);
 972		goto res_err;
 973	}
 974	hdev->bus_addr = hash_res->start;
 975
 976	irq = platform_get_irq(pdev, 0);
 977	if (irq < 0) {
 978		err = irq;
 979		goto res_err;
 980	}
 981
 982	err = devm_request_irq(dev, irq, img_irq_handler, 0,
 983			       dev_name(dev), hdev);
 984	if (err) {
 985		dev_err(dev, "unable to request irq\n");
 986		goto res_err;
 987	}
 988	dev_dbg(dev, "using IRQ channel %d\n", irq);
 989
 990	hdev->hash_clk = devm_clk_get_enabled(&pdev->dev, "hash");
 991	if (IS_ERR(hdev->hash_clk)) {
 992		dev_err(dev, "clock initialization failed.\n");
 993		err = PTR_ERR(hdev->hash_clk);
 994		goto res_err;
 995	}
 996
 997	hdev->sys_clk = devm_clk_get_enabled(&pdev->dev, "sys");
 998	if (IS_ERR(hdev->sys_clk)) {
 999		dev_err(dev, "clock initialization failed.\n");
1000		err = PTR_ERR(hdev->sys_clk);
1001		goto res_err;
1002	}
1003
1004	err = img_hash_dma_init(hdev);
1005	if (err)
1006		goto res_err;
1007
 
 
 
 
 
 
 
 
1008	dev_dbg(dev, "using %s for DMA transfers\n",
1009		dma_chan_name(hdev->dma_lch));
1010
1011	spin_lock(&img_hash.lock);
1012	list_add_tail(&hdev->list, &img_hash.dev_list);
1013	spin_unlock(&img_hash.lock);
1014
1015	err = img_register_algs(hdev);
1016	if (err)
1017		goto err_algs;
1018	dev_info(dev, "Img MD5/SHA1/SHA224/SHA256 Hardware accelerator initialized\n");
1019
1020	return 0;
1021
1022err_algs:
1023	spin_lock(&img_hash.lock);
1024	list_del(&hdev->list);
1025	spin_unlock(&img_hash.lock);
1026	dma_release_channel(hdev->dma_lch);
 
 
 
 
1027res_err:
1028	tasklet_kill(&hdev->done_task);
1029	tasklet_kill(&hdev->dma_task);
1030
1031	return err;
1032}
1033
1034static void img_hash_remove(struct platform_device *pdev)
1035{
1036	struct img_hash_dev *hdev;
1037
1038	hdev = platform_get_drvdata(pdev);
1039	spin_lock(&img_hash.lock);
1040	list_del(&hdev->list);
1041	spin_unlock(&img_hash.lock);
1042
1043	img_unregister_algs(hdev);
1044
1045	tasklet_kill(&hdev->done_task);
1046	tasklet_kill(&hdev->dma_task);
1047
1048	dma_release_channel(hdev->dma_lch);
 
 
 
 
 
1049}
1050
1051#ifdef CONFIG_PM_SLEEP
1052static int img_hash_suspend(struct device *dev)
1053{
1054	struct img_hash_dev *hdev = dev_get_drvdata(dev);
1055
1056	clk_disable_unprepare(hdev->hash_clk);
1057	clk_disable_unprepare(hdev->sys_clk);
1058
1059	return 0;
1060}
1061
1062static int img_hash_resume(struct device *dev)
1063{
1064	struct img_hash_dev *hdev = dev_get_drvdata(dev);
1065	int ret;
1066
1067	ret = clk_prepare_enable(hdev->hash_clk);
1068	if (ret)
1069		return ret;
1070
1071	ret = clk_prepare_enable(hdev->sys_clk);
1072	if (ret) {
1073		clk_disable_unprepare(hdev->hash_clk);
1074		return ret;
1075	}
1076
1077	return 0;
1078}
1079#endif /* CONFIG_PM_SLEEP */
1080
1081static const struct dev_pm_ops img_hash_pm_ops = {
1082	SET_SYSTEM_SLEEP_PM_OPS(img_hash_suspend, img_hash_resume)
1083};
1084
1085static struct platform_driver img_hash_driver = {
1086	.probe		= img_hash_probe,
1087	.remove		= img_hash_remove,
1088	.driver		= {
1089		.name	= "img-hash-accelerator",
1090		.pm	= &img_hash_pm_ops,
1091		.of_match_table	= img_hash_match,
1092	}
1093};
1094module_platform_driver(img_hash_driver);
1095
1096MODULE_LICENSE("GPL v2");
1097MODULE_DESCRIPTION("Imgtec SHA1/224/256 & MD5 hw accelerator driver");
1098MODULE_AUTHOR("Will Thomas.");
1099MODULE_AUTHOR("James Hartley <james.hartley@imgtec.com>");
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2014 Imagination Technologies
   4 * Authors:  Will Thomas, James Hartley
   5 *
   6 *	Interface structure taken from omap-sham driver
   7 */
   8
   9#include <linux/clk.h>
  10#include <linux/dma-mapping.h>
  11#include <linux/dmaengine.h>
  12#include <linux/interrupt.h>
  13#include <linux/io.h>
  14#include <linux/kernel.h>
  15#include <linux/module.h>
  16#include <linux/of_device.h>
  17#include <linux/platform_device.h>
  18#include <linux/scatterlist.h>
  19
  20#include <crypto/internal/hash.h>
  21#include <crypto/md5.h>
  22#include <crypto/sha1.h>
  23#include <crypto/sha2.h>
  24
  25#define CR_RESET			0
  26#define CR_RESET_SET			1
  27#define CR_RESET_UNSET			0
  28
  29#define CR_MESSAGE_LENGTH_H		0x4
  30#define CR_MESSAGE_LENGTH_L		0x8
  31
  32#define CR_CONTROL			0xc
  33#define CR_CONTROL_BYTE_ORDER_3210	0
  34#define CR_CONTROL_BYTE_ORDER_0123	1
  35#define CR_CONTROL_BYTE_ORDER_2310	2
  36#define CR_CONTROL_BYTE_ORDER_1032	3
  37#define CR_CONTROL_BYTE_ORDER_SHIFT	8
  38#define CR_CONTROL_ALGO_MD5	0
  39#define CR_CONTROL_ALGO_SHA1	1
  40#define CR_CONTROL_ALGO_SHA224	2
  41#define CR_CONTROL_ALGO_SHA256	3
  42
  43#define CR_INTSTAT			0x10
  44#define CR_INTENAB			0x14
  45#define CR_INTCLEAR			0x18
  46#define CR_INT_RESULTS_AVAILABLE	BIT(0)
  47#define CR_INT_NEW_RESULTS_SET		BIT(1)
  48#define CR_INT_RESULT_READ_ERR		BIT(2)
  49#define CR_INT_MESSAGE_WRITE_ERROR	BIT(3)
  50#define CR_INT_STATUS			BIT(8)
  51
  52#define CR_RESULT_QUEUE		0x1c
  53#define CR_RSD0				0x40
  54#define CR_CORE_REV			0x50
  55#define CR_CORE_DES1		0x60
  56#define CR_CORE_DES2		0x70
  57
  58#define DRIVER_FLAGS_BUSY		BIT(0)
  59#define DRIVER_FLAGS_FINAL		BIT(1)
  60#define DRIVER_FLAGS_DMA_ACTIVE		BIT(2)
  61#define DRIVER_FLAGS_OUTPUT_READY	BIT(3)
  62#define DRIVER_FLAGS_INIT		BIT(4)
  63#define DRIVER_FLAGS_CPU		BIT(5)
  64#define DRIVER_FLAGS_DMA_READY		BIT(6)
  65#define DRIVER_FLAGS_ERROR		BIT(7)
  66#define DRIVER_FLAGS_SG			BIT(8)
  67#define DRIVER_FLAGS_SHA1		BIT(18)
  68#define DRIVER_FLAGS_SHA224		BIT(19)
  69#define DRIVER_FLAGS_SHA256		BIT(20)
  70#define DRIVER_FLAGS_MD5		BIT(21)
  71
  72#define IMG_HASH_QUEUE_LENGTH		20
  73#define IMG_HASH_DMA_BURST		4
  74#define IMG_HASH_DMA_THRESHOLD		64
  75
  76#ifdef __LITTLE_ENDIAN
  77#define IMG_HASH_BYTE_ORDER		CR_CONTROL_BYTE_ORDER_3210
  78#else
  79#define IMG_HASH_BYTE_ORDER		CR_CONTROL_BYTE_ORDER_0123
  80#endif
  81
  82struct img_hash_dev;
  83
  84struct img_hash_request_ctx {
  85	struct img_hash_dev	*hdev;
  86	u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
  87	unsigned long		flags;
  88	size_t			digsize;
  89
  90	dma_addr_t		dma_addr;
  91	size_t			dma_ct;
  92
  93	/* sg root */
  94	struct scatterlist	*sgfirst;
  95	/* walk state */
  96	struct scatterlist	*sg;
  97	size_t			nents;
  98	size_t			offset;
  99	unsigned int		total;
 100	size_t			sent;
 101
 102	unsigned long		op;
 103
 104	size_t			bufcnt;
 105	struct ahash_request	fallback_req;
 106
 107	/* Zero length buffer must remain last member of struct */
 108	u8 buffer[] __aligned(sizeof(u32));
 109};
 110
 111struct img_hash_ctx {
 112	struct img_hash_dev	*hdev;
 113	unsigned long		flags;
 114	struct crypto_ahash	*fallback;
 115};
 116
 117struct img_hash_dev {
 118	struct list_head	list;
 119	struct device		*dev;
 120	struct clk		*hash_clk;
 121	struct clk		*sys_clk;
 122	void __iomem		*io_base;
 123
 124	phys_addr_t		bus_addr;
 125	void __iomem		*cpu_addr;
 126
 127	spinlock_t		lock;
 128	int			err;
 129	struct tasklet_struct	done_task;
 130	struct tasklet_struct	dma_task;
 131
 132	unsigned long		flags;
 133	struct crypto_queue	queue;
 134	struct ahash_request	*req;
 135
 136	struct dma_chan		*dma_lch;
 137};
 138
 139struct img_hash_drv {
 140	struct list_head dev_list;
 141	spinlock_t lock;
 142};
 143
 144static struct img_hash_drv img_hash = {
 145	.dev_list = LIST_HEAD_INIT(img_hash.dev_list),
 146	.lock = __SPIN_LOCK_UNLOCKED(img_hash.lock),
 147};
 148
 149static inline u32 img_hash_read(struct img_hash_dev *hdev, u32 offset)
 150{
 151	return readl_relaxed(hdev->io_base + offset);
 152}
 153
 154static inline void img_hash_write(struct img_hash_dev *hdev,
 155				  u32 offset, u32 value)
 156{
 157	writel_relaxed(value, hdev->io_base + offset);
 158}
 159
 160static inline u32 img_hash_read_result_queue(struct img_hash_dev *hdev)
 161{
 162	return be32_to_cpu(img_hash_read(hdev, CR_RESULT_QUEUE));
 163}
 164
 165static void img_hash_start(struct img_hash_dev *hdev, bool dma)
 166{
 167	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
 168	u32 cr = IMG_HASH_BYTE_ORDER << CR_CONTROL_BYTE_ORDER_SHIFT;
 169
 170	if (ctx->flags & DRIVER_FLAGS_MD5)
 171		cr |= CR_CONTROL_ALGO_MD5;
 172	else if (ctx->flags & DRIVER_FLAGS_SHA1)
 173		cr |= CR_CONTROL_ALGO_SHA1;
 174	else if (ctx->flags & DRIVER_FLAGS_SHA224)
 175		cr |= CR_CONTROL_ALGO_SHA224;
 176	else if (ctx->flags & DRIVER_FLAGS_SHA256)
 177		cr |= CR_CONTROL_ALGO_SHA256;
 178	dev_dbg(hdev->dev, "Starting hash process\n");
 179	img_hash_write(hdev, CR_CONTROL, cr);
 180
 181	/*
 182	 * The hardware block requires two cycles between writing the control
 183	 * register and writing the first word of data in non DMA mode, to
 184	 * ensure the first data write is not grouped in burst with the control
 185	 * register write a read is issued to 'flush' the bus.
 186	 */
 187	if (!dma)
 188		img_hash_read(hdev, CR_CONTROL);
 189}
 190
 191static int img_hash_xmit_cpu(struct img_hash_dev *hdev, const u8 *buf,
 192			     size_t length, int final)
 193{
 194	u32 count, len32;
 195	const u32 *buffer = (const u32 *)buf;
 196
 197	dev_dbg(hdev->dev, "xmit_cpu:  length: %zu bytes\n", length);
 198
 199	if (final)
 200		hdev->flags |= DRIVER_FLAGS_FINAL;
 201
 202	len32 = DIV_ROUND_UP(length, sizeof(u32));
 203
 204	for (count = 0; count < len32; count++)
 205		writel_relaxed(buffer[count], hdev->cpu_addr);
 206
 207	return -EINPROGRESS;
 208}
 209
 210static void img_hash_dma_callback(void *data)
 211{
 212	struct img_hash_dev *hdev = (struct img_hash_dev *)data;
 213	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
 214
 215	if (ctx->bufcnt) {
 216		img_hash_xmit_cpu(hdev, ctx->buffer, ctx->bufcnt, 0);
 217		ctx->bufcnt = 0;
 218	}
 219	if (ctx->sg)
 220		tasklet_schedule(&hdev->dma_task);
 221}
 222
 223static int img_hash_xmit_dma(struct img_hash_dev *hdev, struct scatterlist *sg)
 224{
 225	struct dma_async_tx_descriptor *desc;
 226	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
 227
 228	ctx->dma_ct = dma_map_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
 229	if (ctx->dma_ct == 0) {
 230		dev_err(hdev->dev, "Invalid DMA sg\n");
 231		hdev->err = -EINVAL;
 232		return -EINVAL;
 233	}
 234
 235	desc = dmaengine_prep_slave_sg(hdev->dma_lch,
 236				       sg,
 237				       ctx->dma_ct,
 238				       DMA_MEM_TO_DEV,
 239				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 240	if (!desc) {
 241		dev_err(hdev->dev, "Null DMA descriptor\n");
 242		hdev->err = -EINVAL;
 243		dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
 244		return -EINVAL;
 245	}
 246	desc->callback = img_hash_dma_callback;
 247	desc->callback_param = hdev;
 248	dmaengine_submit(desc);
 249	dma_async_issue_pending(hdev->dma_lch);
 250
 251	return 0;
 252}
 253
 254static int img_hash_write_via_cpu(struct img_hash_dev *hdev)
 255{
 256	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
 257
 258	ctx->bufcnt = sg_copy_to_buffer(hdev->req->src, sg_nents(ctx->sg),
 259					ctx->buffer, hdev->req->nbytes);
 260
 261	ctx->total = hdev->req->nbytes;
 262	ctx->bufcnt = 0;
 263
 264	hdev->flags |= (DRIVER_FLAGS_CPU | DRIVER_FLAGS_FINAL);
 265
 266	img_hash_start(hdev, false);
 267
 268	return img_hash_xmit_cpu(hdev, ctx->buffer, ctx->total, 1);
 269}
 270
 271static int img_hash_finish(struct ahash_request *req)
 272{
 273	struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
 274
 275	if (!req->result)
 276		return -EINVAL;
 277
 278	memcpy(req->result, ctx->digest, ctx->digsize);
 279
 280	return 0;
 281}
 282
 283static void img_hash_copy_hash(struct ahash_request *req)
 284{
 285	struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
 286	u32 *hash = (u32 *)ctx->digest;
 287	int i;
 288
 289	for (i = (ctx->digsize / sizeof(u32)) - 1; i >= 0; i--)
 290		hash[i] = img_hash_read_result_queue(ctx->hdev);
 291}
 292
 293static void img_hash_finish_req(struct ahash_request *req, int err)
 294{
 295	struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
 296	struct img_hash_dev *hdev =  ctx->hdev;
 297
 298	if (!err) {
 299		img_hash_copy_hash(req);
 300		if (DRIVER_FLAGS_FINAL & hdev->flags)
 301			err = img_hash_finish(req);
 302	} else {
 303		dev_warn(hdev->dev, "Hash failed with error %d\n", err);
 304		ctx->flags |= DRIVER_FLAGS_ERROR;
 305	}
 306
 307	hdev->flags &= ~(DRIVER_FLAGS_DMA_READY | DRIVER_FLAGS_OUTPUT_READY |
 308		DRIVER_FLAGS_CPU | DRIVER_FLAGS_BUSY | DRIVER_FLAGS_FINAL);
 309
 310	if (req->base.complete)
 311		req->base.complete(&req->base, err);
 312}
 313
 314static int img_hash_write_via_dma(struct img_hash_dev *hdev)
 315{
 316	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
 317
 318	img_hash_start(hdev, true);
 319
 320	dev_dbg(hdev->dev, "xmit dma size: %d\n", ctx->total);
 321
 322	if (!ctx->total)
 323		hdev->flags |= DRIVER_FLAGS_FINAL;
 324
 325	hdev->flags |= DRIVER_FLAGS_DMA_ACTIVE | DRIVER_FLAGS_FINAL;
 326
 327	tasklet_schedule(&hdev->dma_task);
 328
 329	return -EINPROGRESS;
 330}
 331
 332static int img_hash_dma_init(struct img_hash_dev *hdev)
 333{
 334	struct dma_slave_config dma_conf;
 335	int err;
 336
 337	hdev->dma_lch = dma_request_chan(hdev->dev, "tx");
 338	if (IS_ERR(hdev->dma_lch)) {
 339		dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n");
 340		return PTR_ERR(hdev->dma_lch);
 341	}
 342	dma_conf.direction = DMA_MEM_TO_DEV;
 343	dma_conf.dst_addr = hdev->bus_addr;
 344	dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 345	dma_conf.dst_maxburst = IMG_HASH_DMA_BURST;
 346	dma_conf.device_fc = false;
 347
 348	err = dmaengine_slave_config(hdev->dma_lch,  &dma_conf);
 349	if (err) {
 350		dev_err(hdev->dev, "Couldn't configure DMA slave.\n");
 351		dma_release_channel(hdev->dma_lch);
 352		return err;
 353	}
 354
 355	return 0;
 356}
 357
 358static void img_hash_dma_task(unsigned long d)
 359{
 360	struct img_hash_dev *hdev = (struct img_hash_dev *)d;
 361	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
 362	u8 *addr;
 363	size_t nbytes, bleft, wsend, len, tbc;
 364	struct scatterlist tsg;
 365
 366	if (!hdev->req || !ctx->sg)
 
 
 
 
 367		return;
 368
 369	addr = sg_virt(ctx->sg);
 370	nbytes = ctx->sg->length - ctx->offset;
 371
 372	/*
 373	 * The hash accelerator does not support a data valid mask. This means
 374	 * that if each dma (i.e. per page) is not a multiple of 4 bytes, the
 375	 * padding bytes in the last word written by that dma would erroneously
 376	 * be included in the hash. To avoid this we round down the transfer,
 377	 * and add the excess to the start of the next dma. It does not matter
 378	 * that the final dma may not be a multiple of 4 bytes as the hashing
 379	 * block is programmed to accept the correct number of bytes.
 380	 */
 381
 382	bleft = nbytes % 4;
 383	wsend = (nbytes / 4);
 384
 385	if (wsend) {
 386		sg_init_one(&tsg, addr + ctx->offset, wsend * 4);
 387		if (img_hash_xmit_dma(hdev, &tsg)) {
 388			dev_err(hdev->dev, "DMA failed, falling back to CPU");
 389			ctx->flags |= DRIVER_FLAGS_CPU;
 390			hdev->err = 0;
 391			img_hash_xmit_cpu(hdev, addr + ctx->offset,
 392					  wsend * 4, 0);
 393			ctx->sent += wsend * 4;
 394			wsend = 0;
 395		} else {
 396			ctx->sent += wsend * 4;
 397		}
 398	}
 399
 400	if (bleft) {
 401		ctx->bufcnt = sg_pcopy_to_buffer(ctx->sgfirst, ctx->nents,
 402						 ctx->buffer, bleft, ctx->sent);
 403		tbc = 0;
 404		ctx->sg = sg_next(ctx->sg);
 405		while (ctx->sg && (ctx->bufcnt < 4)) {
 406			len = ctx->sg->length;
 407			if (likely(len > (4 - ctx->bufcnt)))
 408				len = 4 - ctx->bufcnt;
 409			tbc = sg_pcopy_to_buffer(ctx->sgfirst, ctx->nents,
 410						 ctx->buffer + ctx->bufcnt, len,
 411					ctx->sent + ctx->bufcnt);
 412			ctx->bufcnt += tbc;
 413			if (tbc >= ctx->sg->length) {
 414				ctx->sg = sg_next(ctx->sg);
 415				tbc = 0;
 416			}
 417		}
 418
 419		ctx->sent += ctx->bufcnt;
 420		ctx->offset = tbc;
 421
 422		if (!wsend)
 423			img_hash_dma_callback(hdev);
 424	} else {
 425		ctx->offset = 0;
 426		ctx->sg = sg_next(ctx->sg);
 427	}
 428}
 429
 430static int img_hash_write_via_dma_stop(struct img_hash_dev *hdev)
 431{
 432	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
 433
 434	if (ctx->flags & DRIVER_FLAGS_SG)
 435		dma_unmap_sg(hdev->dev, ctx->sg, ctx->dma_ct, DMA_TO_DEVICE);
 436
 437	return 0;
 438}
 439
 440static int img_hash_process_data(struct img_hash_dev *hdev)
 441{
 442	struct ahash_request *req = hdev->req;
 443	struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
 444	int err = 0;
 445
 446	ctx->bufcnt = 0;
 447
 448	if (req->nbytes >= IMG_HASH_DMA_THRESHOLD) {
 449		dev_dbg(hdev->dev, "process data request(%d bytes) using DMA\n",
 450			req->nbytes);
 451		err = img_hash_write_via_dma(hdev);
 452	} else {
 453		dev_dbg(hdev->dev, "process data request(%d bytes) using CPU\n",
 454			req->nbytes);
 455		err = img_hash_write_via_cpu(hdev);
 456	}
 457	return err;
 458}
 459
 460static int img_hash_hw_init(struct img_hash_dev *hdev)
 461{
 462	unsigned long long nbits;
 463	u32 u, l;
 464
 465	img_hash_write(hdev, CR_RESET, CR_RESET_SET);
 466	img_hash_write(hdev, CR_RESET, CR_RESET_UNSET);
 467	img_hash_write(hdev, CR_INTENAB, CR_INT_NEW_RESULTS_SET);
 468
 469	nbits = (u64)hdev->req->nbytes << 3;
 470	u = nbits >> 32;
 471	l = nbits;
 472	img_hash_write(hdev, CR_MESSAGE_LENGTH_H, u);
 473	img_hash_write(hdev, CR_MESSAGE_LENGTH_L, l);
 474
 475	if (!(DRIVER_FLAGS_INIT & hdev->flags)) {
 476		hdev->flags |= DRIVER_FLAGS_INIT;
 477		hdev->err = 0;
 478	}
 479	dev_dbg(hdev->dev, "hw initialized, nbits: %llx\n", nbits);
 480	return 0;
 481}
 482
 483static int img_hash_init(struct ahash_request *req)
 484{
 485	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 486	struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
 487	struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 488
 489	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
 490	rctx->fallback_req.base.flags =	req->base.flags
 491		& CRYPTO_TFM_REQ_MAY_SLEEP;
 492
 493	return crypto_ahash_init(&rctx->fallback_req);
 494}
 495
 496static int img_hash_handle_queue(struct img_hash_dev *hdev,
 497				 struct ahash_request *req)
 498{
 499	struct crypto_async_request *async_req, *backlog;
 500	struct img_hash_request_ctx *ctx;
 501	unsigned long flags;
 502	int err = 0, res = 0;
 503
 504	spin_lock_irqsave(&hdev->lock, flags);
 505
 506	if (req)
 507		res = ahash_enqueue_request(&hdev->queue, req);
 508
 509	if (DRIVER_FLAGS_BUSY & hdev->flags) {
 510		spin_unlock_irqrestore(&hdev->lock, flags);
 511		return res;
 512	}
 513
 514	backlog = crypto_get_backlog(&hdev->queue);
 515	async_req = crypto_dequeue_request(&hdev->queue);
 516	if (async_req)
 517		hdev->flags |= DRIVER_FLAGS_BUSY;
 518
 519	spin_unlock_irqrestore(&hdev->lock, flags);
 520
 521	if (!async_req)
 522		return res;
 523
 524	if (backlog)
 525		backlog->complete(backlog, -EINPROGRESS);
 526
 527	req = ahash_request_cast(async_req);
 528	hdev->req = req;
 529
 530	ctx = ahash_request_ctx(req);
 531
 532	dev_info(hdev->dev, "processing req, op: %lu, bytes: %d\n",
 533		 ctx->op, req->nbytes);
 534
 535	err = img_hash_hw_init(hdev);
 536
 537	if (!err)
 538		err = img_hash_process_data(hdev);
 539
 540	if (err != -EINPROGRESS) {
 541		/* done_task will not finish so do it here */
 542		img_hash_finish_req(req, err);
 543	}
 544	return res;
 545}
 546
 547static int img_hash_update(struct ahash_request *req)
 548{
 549	struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
 550	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 551	struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 552
 553	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
 554	rctx->fallback_req.base.flags = req->base.flags
 555		& CRYPTO_TFM_REQ_MAY_SLEEP;
 556	rctx->fallback_req.nbytes = req->nbytes;
 557	rctx->fallback_req.src = req->src;
 558
 559	return crypto_ahash_update(&rctx->fallback_req);
 560}
 561
 562static int img_hash_final(struct ahash_request *req)
 563{
 564	struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
 565	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 566	struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 567
 568	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
 569	rctx->fallback_req.base.flags = req->base.flags
 570		& CRYPTO_TFM_REQ_MAY_SLEEP;
 571	rctx->fallback_req.result = req->result;
 572
 573	return crypto_ahash_final(&rctx->fallback_req);
 574}
 575
 576static int img_hash_finup(struct ahash_request *req)
 577{
 578	struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
 579	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 580	struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 581
 582	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
 583	rctx->fallback_req.base.flags = req->base.flags
 584		& CRYPTO_TFM_REQ_MAY_SLEEP;
 585	rctx->fallback_req.nbytes = req->nbytes;
 586	rctx->fallback_req.src = req->src;
 587	rctx->fallback_req.result = req->result;
 588
 589	return crypto_ahash_finup(&rctx->fallback_req);
 590}
 591
 592static int img_hash_import(struct ahash_request *req, const void *in)
 593{
 594	struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
 595	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 596	struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 597
 598	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
 599	rctx->fallback_req.base.flags = req->base.flags
 600		& CRYPTO_TFM_REQ_MAY_SLEEP;
 601
 602	return crypto_ahash_import(&rctx->fallback_req, in);
 603}
 604
 605static int img_hash_export(struct ahash_request *req, void *out)
 606{
 607	struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
 608	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 609	struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 610
 611	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
 612	rctx->fallback_req.base.flags = req->base.flags
 613		& CRYPTO_TFM_REQ_MAY_SLEEP;
 614
 615	return crypto_ahash_export(&rctx->fallback_req, out);
 616}
 617
 618static int img_hash_digest(struct ahash_request *req)
 619{
 620	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 621	struct img_hash_ctx *tctx = crypto_ahash_ctx(tfm);
 622	struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
 623	struct img_hash_dev *hdev = NULL;
 624	struct img_hash_dev *tmp;
 625	int err;
 626
 627	spin_lock(&img_hash.lock);
 628	if (!tctx->hdev) {
 629		list_for_each_entry(tmp, &img_hash.dev_list, list) {
 630			hdev = tmp;
 631			break;
 632		}
 633		tctx->hdev = hdev;
 634
 635	} else {
 636		hdev = tctx->hdev;
 637	}
 638
 639	spin_unlock(&img_hash.lock);
 640	ctx->hdev = hdev;
 641	ctx->flags = 0;
 642	ctx->digsize = crypto_ahash_digestsize(tfm);
 643
 644	switch (ctx->digsize) {
 645	case SHA1_DIGEST_SIZE:
 646		ctx->flags |= DRIVER_FLAGS_SHA1;
 647		break;
 648	case SHA256_DIGEST_SIZE:
 649		ctx->flags |= DRIVER_FLAGS_SHA256;
 650		break;
 651	case SHA224_DIGEST_SIZE:
 652		ctx->flags |= DRIVER_FLAGS_SHA224;
 653		break;
 654	case MD5_DIGEST_SIZE:
 655		ctx->flags |= DRIVER_FLAGS_MD5;
 656		break;
 657	default:
 658		return -EINVAL;
 659	}
 660
 661	ctx->bufcnt = 0;
 662	ctx->offset = 0;
 663	ctx->sent = 0;
 664	ctx->total = req->nbytes;
 665	ctx->sg = req->src;
 666	ctx->sgfirst = req->src;
 667	ctx->nents = sg_nents(ctx->sg);
 668
 669	err = img_hash_handle_queue(tctx->hdev, req);
 670
 671	return err;
 672}
 673
 674static int img_hash_cra_init(struct crypto_tfm *tfm, const char *alg_name)
 675{
 676	struct img_hash_ctx *ctx = crypto_tfm_ctx(tfm);
 677	int err = -ENOMEM;
 678
 679	ctx->fallback = crypto_alloc_ahash(alg_name, 0,
 680					   CRYPTO_ALG_NEED_FALLBACK);
 681	if (IS_ERR(ctx->fallback)) {
 682		pr_err("img_hash: Could not load fallback driver.\n");
 683		err = PTR_ERR(ctx->fallback);
 684		goto err;
 685	}
 686	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 687				 sizeof(struct img_hash_request_ctx) +
 688				 crypto_ahash_reqsize(ctx->fallback) +
 689				 IMG_HASH_DMA_THRESHOLD);
 690
 691	return 0;
 692
 693err:
 694	return err;
 695}
 696
 697static int img_hash_cra_md5_init(struct crypto_tfm *tfm)
 698{
 699	return img_hash_cra_init(tfm, "md5-generic");
 700}
 701
 702static int img_hash_cra_sha1_init(struct crypto_tfm *tfm)
 703{
 704	return img_hash_cra_init(tfm, "sha1-generic");
 705}
 706
 707static int img_hash_cra_sha224_init(struct crypto_tfm *tfm)
 708{
 709	return img_hash_cra_init(tfm, "sha224-generic");
 710}
 711
 712static int img_hash_cra_sha256_init(struct crypto_tfm *tfm)
 713{
 714	return img_hash_cra_init(tfm, "sha256-generic");
 715}
 716
 717static void img_hash_cra_exit(struct crypto_tfm *tfm)
 718{
 719	struct img_hash_ctx *tctx = crypto_tfm_ctx(tfm);
 720
 721	crypto_free_ahash(tctx->fallback);
 722}
 723
 724static irqreturn_t img_irq_handler(int irq, void *dev_id)
 725{
 726	struct img_hash_dev *hdev = dev_id;
 727	u32 reg;
 728
 729	reg = img_hash_read(hdev, CR_INTSTAT);
 730	img_hash_write(hdev, CR_INTCLEAR, reg);
 731
 732	if (reg & CR_INT_NEW_RESULTS_SET) {
 733		dev_dbg(hdev->dev, "IRQ CR_INT_NEW_RESULTS_SET\n");
 734		if (DRIVER_FLAGS_BUSY & hdev->flags) {
 735			hdev->flags |= DRIVER_FLAGS_OUTPUT_READY;
 736			if (!(DRIVER_FLAGS_CPU & hdev->flags))
 737				hdev->flags |= DRIVER_FLAGS_DMA_READY;
 738			tasklet_schedule(&hdev->done_task);
 739		} else {
 740			dev_warn(hdev->dev,
 741				 "HASH interrupt when no active requests.\n");
 742		}
 743	} else if (reg & CR_INT_RESULTS_AVAILABLE) {
 744		dev_warn(hdev->dev,
 745			 "IRQ triggered before the hash had completed\n");
 746	} else if (reg & CR_INT_RESULT_READ_ERR) {
 747		dev_warn(hdev->dev,
 748			 "Attempt to read from an empty result queue\n");
 749	} else if (reg & CR_INT_MESSAGE_WRITE_ERROR) {
 750		dev_warn(hdev->dev,
 751			 "Data written before the hardware was configured\n");
 752	}
 753	return IRQ_HANDLED;
 754}
 755
 756static struct ahash_alg img_algs[] = {
 757	{
 758		.init = img_hash_init,
 759		.update = img_hash_update,
 760		.final = img_hash_final,
 761		.finup = img_hash_finup,
 762		.export = img_hash_export,
 763		.import = img_hash_import,
 764		.digest = img_hash_digest,
 765		.halg = {
 766			.digestsize = MD5_DIGEST_SIZE,
 767			.statesize = sizeof(struct md5_state),
 768			.base = {
 769				.cra_name = "md5",
 770				.cra_driver_name = "img-md5",
 771				.cra_priority = 300,
 772				.cra_flags =
 773				CRYPTO_ALG_ASYNC |
 774				CRYPTO_ALG_NEED_FALLBACK,
 775				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
 776				.cra_ctxsize = sizeof(struct img_hash_ctx),
 777				.cra_init = img_hash_cra_md5_init,
 778				.cra_exit = img_hash_cra_exit,
 779				.cra_module = THIS_MODULE,
 780			}
 781		}
 782	},
 783	{
 784		.init = img_hash_init,
 785		.update = img_hash_update,
 786		.final = img_hash_final,
 787		.finup = img_hash_finup,
 788		.export = img_hash_export,
 789		.import = img_hash_import,
 790		.digest = img_hash_digest,
 791		.halg = {
 792			.digestsize = SHA1_DIGEST_SIZE,
 793			.statesize = sizeof(struct sha1_state),
 794			.base = {
 795				.cra_name = "sha1",
 796				.cra_driver_name = "img-sha1",
 797				.cra_priority = 300,
 798				.cra_flags =
 799				CRYPTO_ALG_ASYNC |
 800				CRYPTO_ALG_NEED_FALLBACK,
 801				.cra_blocksize = SHA1_BLOCK_SIZE,
 802				.cra_ctxsize = sizeof(struct img_hash_ctx),
 803				.cra_init = img_hash_cra_sha1_init,
 804				.cra_exit = img_hash_cra_exit,
 805				.cra_module = THIS_MODULE,
 806			}
 807		}
 808	},
 809	{
 810		.init = img_hash_init,
 811		.update = img_hash_update,
 812		.final = img_hash_final,
 813		.finup = img_hash_finup,
 814		.export = img_hash_export,
 815		.import = img_hash_import,
 816		.digest = img_hash_digest,
 817		.halg = {
 818			.digestsize = SHA224_DIGEST_SIZE,
 819			.statesize = sizeof(struct sha256_state),
 820			.base = {
 821				.cra_name = "sha224",
 822				.cra_driver_name = "img-sha224",
 823				.cra_priority = 300,
 824				.cra_flags =
 825				CRYPTO_ALG_ASYNC |
 826				CRYPTO_ALG_NEED_FALLBACK,
 827				.cra_blocksize = SHA224_BLOCK_SIZE,
 828				.cra_ctxsize = sizeof(struct img_hash_ctx),
 829				.cra_init = img_hash_cra_sha224_init,
 830				.cra_exit = img_hash_cra_exit,
 831				.cra_module = THIS_MODULE,
 832			}
 833		}
 834	},
 835	{
 836		.init = img_hash_init,
 837		.update = img_hash_update,
 838		.final = img_hash_final,
 839		.finup = img_hash_finup,
 840		.export = img_hash_export,
 841		.import = img_hash_import,
 842		.digest = img_hash_digest,
 843		.halg = {
 844			.digestsize = SHA256_DIGEST_SIZE,
 845			.statesize = sizeof(struct sha256_state),
 846			.base = {
 847				.cra_name = "sha256",
 848				.cra_driver_name = "img-sha256",
 849				.cra_priority = 300,
 850				.cra_flags =
 851				CRYPTO_ALG_ASYNC |
 852				CRYPTO_ALG_NEED_FALLBACK,
 853				.cra_blocksize = SHA256_BLOCK_SIZE,
 854				.cra_ctxsize = sizeof(struct img_hash_ctx),
 855				.cra_init = img_hash_cra_sha256_init,
 856				.cra_exit = img_hash_cra_exit,
 857				.cra_module = THIS_MODULE,
 858			}
 859		}
 860	}
 861};
 862
 863static int img_register_algs(struct img_hash_dev *hdev)
 864{
 865	int i, err;
 866
 867	for (i = 0; i < ARRAY_SIZE(img_algs); i++) {
 868		err = crypto_register_ahash(&img_algs[i]);
 869		if (err)
 870			goto err_reg;
 871	}
 872	return 0;
 873
 874err_reg:
 875	for (; i--; )
 876		crypto_unregister_ahash(&img_algs[i]);
 877
 878	return err;
 879}
 880
 881static int img_unregister_algs(struct img_hash_dev *hdev)
 882{
 883	int i;
 884
 885	for (i = 0; i < ARRAY_SIZE(img_algs); i++)
 886		crypto_unregister_ahash(&img_algs[i]);
 887	return 0;
 888}
 889
 890static void img_hash_done_task(unsigned long data)
 891{
 892	struct img_hash_dev *hdev = (struct img_hash_dev *)data;
 893	int err = 0;
 894
 895	if (hdev->err == -EINVAL) {
 896		err = hdev->err;
 897		goto finish;
 898	}
 899
 900	if (!(DRIVER_FLAGS_BUSY & hdev->flags)) {
 901		img_hash_handle_queue(hdev, NULL);
 902		return;
 903	}
 904
 905	if (DRIVER_FLAGS_CPU & hdev->flags) {
 906		if (DRIVER_FLAGS_OUTPUT_READY & hdev->flags) {
 907			hdev->flags &= ~DRIVER_FLAGS_OUTPUT_READY;
 908			goto finish;
 909		}
 910	} else if (DRIVER_FLAGS_DMA_READY & hdev->flags) {
 911		if (DRIVER_FLAGS_DMA_ACTIVE & hdev->flags) {
 912			hdev->flags &= ~DRIVER_FLAGS_DMA_ACTIVE;
 913			img_hash_write_via_dma_stop(hdev);
 914			if (hdev->err) {
 915				err = hdev->err;
 916				goto finish;
 917			}
 918		}
 919		if (DRIVER_FLAGS_OUTPUT_READY & hdev->flags) {
 920			hdev->flags &= ~(DRIVER_FLAGS_DMA_READY |
 921					DRIVER_FLAGS_OUTPUT_READY);
 922			goto finish;
 923		}
 924	}
 925	return;
 926
 927finish:
 928	img_hash_finish_req(hdev->req, err);
 929}
 930
 931static const struct of_device_id img_hash_match[] = {
 932	{ .compatible = "img,hash-accelerator" },
 933	{}
 934};
 935MODULE_DEVICE_TABLE(of, img_hash_match);
 936
 937static int img_hash_probe(struct platform_device *pdev)
 938{
 939	struct img_hash_dev *hdev;
 940	struct device *dev = &pdev->dev;
 941	struct resource *hash_res;
 942	int	irq;
 943	int err;
 944
 945	hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL);
 946	if (hdev == NULL)
 947		return -ENOMEM;
 948
 949	spin_lock_init(&hdev->lock);
 950
 951	hdev->dev = dev;
 952
 953	platform_set_drvdata(pdev, hdev);
 954
 955	INIT_LIST_HEAD(&hdev->list);
 956
 957	tasklet_init(&hdev->done_task, img_hash_done_task, (unsigned long)hdev);
 958	tasklet_init(&hdev->dma_task, img_hash_dma_task, (unsigned long)hdev);
 959
 960	crypto_init_queue(&hdev->queue, IMG_HASH_QUEUE_LENGTH);
 961
 962	/* Register bank */
 963	hdev->io_base = devm_platform_ioremap_resource(pdev, 0);
 964	if (IS_ERR(hdev->io_base)) {
 965		err = PTR_ERR(hdev->io_base);
 966		goto res_err;
 967	}
 968
 969	/* Write port (DMA or CPU) */
 970	hash_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
 971	hdev->cpu_addr = devm_ioremap_resource(dev, hash_res);
 972	if (IS_ERR(hdev->cpu_addr)) {
 973		err = PTR_ERR(hdev->cpu_addr);
 974		goto res_err;
 975	}
 976	hdev->bus_addr = hash_res->start;
 977
 978	irq = platform_get_irq(pdev, 0);
 979	if (irq < 0) {
 980		err = irq;
 981		goto res_err;
 982	}
 983
 984	err = devm_request_irq(dev, irq, img_irq_handler, 0,
 985			       dev_name(dev), hdev);
 986	if (err) {
 987		dev_err(dev, "unable to request irq\n");
 988		goto res_err;
 989	}
 990	dev_dbg(dev, "using IRQ channel %d\n", irq);
 991
 992	hdev->hash_clk = devm_clk_get(&pdev->dev, "hash");
 993	if (IS_ERR(hdev->hash_clk)) {
 994		dev_err(dev, "clock initialization failed.\n");
 995		err = PTR_ERR(hdev->hash_clk);
 996		goto res_err;
 997	}
 998
 999	hdev->sys_clk = devm_clk_get(&pdev->dev, "sys");
1000	if (IS_ERR(hdev->sys_clk)) {
1001		dev_err(dev, "clock initialization failed.\n");
1002		err = PTR_ERR(hdev->sys_clk);
1003		goto res_err;
1004	}
1005
1006	err = clk_prepare_enable(hdev->hash_clk);
1007	if (err)
1008		goto res_err;
1009
1010	err = clk_prepare_enable(hdev->sys_clk);
1011	if (err)
1012		goto clk_err;
1013
1014	err = img_hash_dma_init(hdev);
1015	if (err)
1016		goto dma_err;
1017
1018	dev_dbg(dev, "using %s for DMA transfers\n",
1019		dma_chan_name(hdev->dma_lch));
1020
1021	spin_lock(&img_hash.lock);
1022	list_add_tail(&hdev->list, &img_hash.dev_list);
1023	spin_unlock(&img_hash.lock);
1024
1025	err = img_register_algs(hdev);
1026	if (err)
1027		goto err_algs;
1028	dev_info(dev, "Img MD5/SHA1/SHA224/SHA256 Hardware accelerator initialized\n");
1029
1030	return 0;
1031
1032err_algs:
1033	spin_lock(&img_hash.lock);
1034	list_del(&hdev->list);
1035	spin_unlock(&img_hash.lock);
1036	dma_release_channel(hdev->dma_lch);
1037dma_err:
1038	clk_disable_unprepare(hdev->sys_clk);
1039clk_err:
1040	clk_disable_unprepare(hdev->hash_clk);
1041res_err:
1042	tasklet_kill(&hdev->done_task);
1043	tasklet_kill(&hdev->dma_task);
1044
1045	return err;
1046}
1047
1048static int img_hash_remove(struct platform_device *pdev)
1049{
1050	struct img_hash_dev *hdev;
1051
1052	hdev = platform_get_drvdata(pdev);
1053	spin_lock(&img_hash.lock);
1054	list_del(&hdev->list);
1055	spin_unlock(&img_hash.lock);
1056
1057	img_unregister_algs(hdev);
1058
1059	tasklet_kill(&hdev->done_task);
1060	tasklet_kill(&hdev->dma_task);
1061
1062	dma_release_channel(hdev->dma_lch);
1063
1064	clk_disable_unprepare(hdev->hash_clk);
1065	clk_disable_unprepare(hdev->sys_clk);
1066
1067	return 0;
1068}
1069
1070#ifdef CONFIG_PM_SLEEP
1071static int img_hash_suspend(struct device *dev)
1072{
1073	struct img_hash_dev *hdev = dev_get_drvdata(dev);
1074
1075	clk_disable_unprepare(hdev->hash_clk);
1076	clk_disable_unprepare(hdev->sys_clk);
1077
1078	return 0;
1079}
1080
1081static int img_hash_resume(struct device *dev)
1082{
1083	struct img_hash_dev *hdev = dev_get_drvdata(dev);
1084	int ret;
1085
1086	ret = clk_prepare_enable(hdev->hash_clk);
1087	if (ret)
1088		return ret;
1089
1090	ret = clk_prepare_enable(hdev->sys_clk);
1091	if (ret) {
1092		clk_disable_unprepare(hdev->hash_clk);
1093		return ret;
1094	}
1095
1096	return 0;
1097}
1098#endif /* CONFIG_PM_SLEEP */
1099
1100static const struct dev_pm_ops img_hash_pm_ops = {
1101	SET_SYSTEM_SLEEP_PM_OPS(img_hash_suspend, img_hash_resume)
1102};
1103
1104static struct platform_driver img_hash_driver = {
1105	.probe		= img_hash_probe,
1106	.remove		= img_hash_remove,
1107	.driver		= {
1108		.name	= "img-hash-accelerator",
1109		.pm	= &img_hash_pm_ops,
1110		.of_match_table	= of_match_ptr(img_hash_match),
1111	}
1112};
1113module_platform_driver(img_hash_driver);
1114
1115MODULE_LICENSE("GPL v2");
1116MODULE_DESCRIPTION("Imgtec SHA1/224/256 & MD5 hw accelerator driver");
1117MODULE_AUTHOR("Will Thomas.");
1118MODULE_AUTHOR("James Hartley <james.hartley@imgtec.com>");