Linux Audio

Check our new training course

Buildroot integration, development and maintenance

Need a Buildroot system for your embedded project?
Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2014 Imagination Technologies
   4 * Authors:  Will Thomas, James Hartley
   5 *
 
 
 
 
   6 *	Interface structure taken from omap-sham driver
   7 */
   8
   9#include <linux/clk.h>
  10#include <linux/dma-mapping.h>
  11#include <linux/dmaengine.h>
  12#include <linux/interrupt.h>
  13#include <linux/io.h>
  14#include <linux/kernel.h>
  15#include <linux/module.h>
  16#include <linux/mod_devicetable.h>
  17#include <linux/platform_device.h>
  18#include <linux/scatterlist.h>
  19
  20#include <crypto/internal/hash.h>
  21#include <crypto/md5.h>
  22#include <crypto/sha1.h>
  23#include <crypto/sha2.h>
  24
  25#define CR_RESET			0
  26#define CR_RESET_SET			1
  27#define CR_RESET_UNSET			0
  28
  29#define CR_MESSAGE_LENGTH_H		0x4
  30#define CR_MESSAGE_LENGTH_L		0x8
  31
  32#define CR_CONTROL			0xc
  33#define CR_CONTROL_BYTE_ORDER_3210	0
  34#define CR_CONTROL_BYTE_ORDER_0123	1
  35#define CR_CONTROL_BYTE_ORDER_2310	2
  36#define CR_CONTROL_BYTE_ORDER_1032	3
  37#define CR_CONTROL_BYTE_ORDER_SHIFT	8
  38#define CR_CONTROL_ALGO_MD5	0
  39#define CR_CONTROL_ALGO_SHA1	1
  40#define CR_CONTROL_ALGO_SHA224	2
  41#define CR_CONTROL_ALGO_SHA256	3
  42
  43#define CR_INTSTAT			0x10
  44#define CR_INTENAB			0x14
  45#define CR_INTCLEAR			0x18
  46#define CR_INT_RESULTS_AVAILABLE	BIT(0)
  47#define CR_INT_NEW_RESULTS_SET		BIT(1)
  48#define CR_INT_RESULT_READ_ERR		BIT(2)
  49#define CR_INT_MESSAGE_WRITE_ERROR	BIT(3)
  50#define CR_INT_STATUS			BIT(8)
  51
  52#define CR_RESULT_QUEUE		0x1c
  53#define CR_RSD0				0x40
  54#define CR_CORE_REV			0x50
  55#define CR_CORE_DES1		0x60
  56#define CR_CORE_DES2		0x70
  57
  58#define DRIVER_FLAGS_BUSY		BIT(0)
  59#define DRIVER_FLAGS_FINAL		BIT(1)
  60#define DRIVER_FLAGS_DMA_ACTIVE		BIT(2)
  61#define DRIVER_FLAGS_OUTPUT_READY	BIT(3)
  62#define DRIVER_FLAGS_INIT		BIT(4)
  63#define DRIVER_FLAGS_CPU		BIT(5)
  64#define DRIVER_FLAGS_DMA_READY		BIT(6)
  65#define DRIVER_FLAGS_ERROR		BIT(7)
  66#define DRIVER_FLAGS_SG			BIT(8)
  67#define DRIVER_FLAGS_SHA1		BIT(18)
  68#define DRIVER_FLAGS_SHA224		BIT(19)
  69#define DRIVER_FLAGS_SHA256		BIT(20)
  70#define DRIVER_FLAGS_MD5		BIT(21)
  71
  72#define IMG_HASH_QUEUE_LENGTH		20
  73#define IMG_HASH_DMA_BURST		4
  74#define IMG_HASH_DMA_THRESHOLD		64
  75
  76#ifdef __LITTLE_ENDIAN
  77#define IMG_HASH_BYTE_ORDER		CR_CONTROL_BYTE_ORDER_3210
  78#else
  79#define IMG_HASH_BYTE_ORDER		CR_CONTROL_BYTE_ORDER_0123
  80#endif
  81
  82struct img_hash_dev;
  83
  84struct img_hash_request_ctx {
  85	struct img_hash_dev	*hdev;
  86	u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
  87	unsigned long		flags;
  88	size_t			digsize;
  89
  90	dma_addr_t		dma_addr;
  91	size_t			dma_ct;
  92
  93	/* sg root */
  94	struct scatterlist	*sgfirst;
  95	/* walk state */
  96	struct scatterlist	*sg;
  97	size_t			nents;
  98	size_t			offset;
  99	unsigned int		total;
 100	size_t			sent;
 101
 102	unsigned long		op;
 103
 104	size_t			bufcnt;
 105	struct ahash_request	fallback_req;
 106
 107	/* Zero length buffer must remain last member of struct */
 108	u8 buffer[] __aligned(sizeof(u32));
 109};
 110
 111struct img_hash_ctx {
 112	struct img_hash_dev	*hdev;
 113	unsigned long		flags;
 114	struct crypto_ahash	*fallback;
 115};
 116
 117struct img_hash_dev {
 118	struct list_head	list;
 119	struct device		*dev;
 120	struct clk		*hash_clk;
 121	struct clk		*sys_clk;
 122	void __iomem		*io_base;
 123
 124	phys_addr_t		bus_addr;
 125	void __iomem		*cpu_addr;
 126
 127	spinlock_t		lock;
 128	int			err;
 129	struct tasklet_struct	done_task;
 130	struct tasklet_struct	dma_task;
 131
 132	unsigned long		flags;
 133	struct crypto_queue	queue;
 134	struct ahash_request	*req;
 135
 136	struct dma_chan		*dma_lch;
 137};
 138
 139struct img_hash_drv {
 140	struct list_head dev_list;
 141	spinlock_t lock;
 142};
 143
 144static struct img_hash_drv img_hash = {
 145	.dev_list = LIST_HEAD_INIT(img_hash.dev_list),
 146	.lock = __SPIN_LOCK_UNLOCKED(img_hash.lock),
 147};
 148
 149static inline u32 img_hash_read(struct img_hash_dev *hdev, u32 offset)
 150{
 151	return readl_relaxed(hdev->io_base + offset);
 152}
 153
 154static inline void img_hash_write(struct img_hash_dev *hdev,
 155				  u32 offset, u32 value)
 156{
 157	writel_relaxed(value, hdev->io_base + offset);
 158}
 159
 160static inline __be32 img_hash_read_result_queue(struct img_hash_dev *hdev)
 161{
 162	return cpu_to_be32(img_hash_read(hdev, CR_RESULT_QUEUE));
 163}
 164
 165static void img_hash_start(struct img_hash_dev *hdev, bool dma)
 166{
 167	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
 168	u32 cr = IMG_HASH_BYTE_ORDER << CR_CONTROL_BYTE_ORDER_SHIFT;
 169
 170	if (ctx->flags & DRIVER_FLAGS_MD5)
 171		cr |= CR_CONTROL_ALGO_MD5;
 172	else if (ctx->flags & DRIVER_FLAGS_SHA1)
 173		cr |= CR_CONTROL_ALGO_SHA1;
 174	else if (ctx->flags & DRIVER_FLAGS_SHA224)
 175		cr |= CR_CONTROL_ALGO_SHA224;
 176	else if (ctx->flags & DRIVER_FLAGS_SHA256)
 177		cr |= CR_CONTROL_ALGO_SHA256;
 178	dev_dbg(hdev->dev, "Starting hash process\n");
 179	img_hash_write(hdev, CR_CONTROL, cr);
 180
 181	/*
 182	 * The hardware block requires two cycles between writing the control
 183	 * register and writing the first word of data in non DMA mode, to
 184	 * ensure the first data write is not grouped in burst with the control
 185	 * register write a read is issued to 'flush' the bus.
 186	 */
 187	if (!dma)
 188		img_hash_read(hdev, CR_CONTROL);
 189}
 190
 191static int img_hash_xmit_cpu(struct img_hash_dev *hdev, const u8 *buf,
 192			     size_t length, int final)
 193{
 194	u32 count, len32;
 195	const u32 *buffer = (const u32 *)buf;
 196
 197	dev_dbg(hdev->dev, "xmit_cpu:  length: %zu bytes\n", length);
 198
 199	if (final)
 200		hdev->flags |= DRIVER_FLAGS_FINAL;
 201
 202	len32 = DIV_ROUND_UP(length, sizeof(u32));
 203
 204	for (count = 0; count < len32; count++)
 205		writel_relaxed(buffer[count], hdev->cpu_addr);
 206
 207	return -EINPROGRESS;
 208}
 209
 210static void img_hash_dma_callback(void *data)
 211{
 212	struct img_hash_dev *hdev = data;
 213	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
 214
 215	if (ctx->bufcnt) {
 216		img_hash_xmit_cpu(hdev, ctx->buffer, ctx->bufcnt, 0);
 217		ctx->bufcnt = 0;
 218	}
 219	if (ctx->sg)
 220		tasklet_schedule(&hdev->dma_task);
 221}
 222
 223static int img_hash_xmit_dma(struct img_hash_dev *hdev, struct scatterlist *sg)
 224{
 225	struct dma_async_tx_descriptor *desc;
 226	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
 227
 228	ctx->dma_ct = dma_map_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
 229	if (ctx->dma_ct == 0) {
 230		dev_err(hdev->dev, "Invalid DMA sg\n");
 231		hdev->err = -EINVAL;
 232		return -EINVAL;
 233	}
 234
 235	desc = dmaengine_prep_slave_sg(hdev->dma_lch,
 236				       sg,
 237				       ctx->dma_ct,
 238				       DMA_MEM_TO_DEV,
 239				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 240	if (!desc) {
 241		dev_err(hdev->dev, "Null DMA descriptor\n");
 242		hdev->err = -EINVAL;
 243		dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE);
 244		return -EINVAL;
 245	}
 246	desc->callback = img_hash_dma_callback;
 247	desc->callback_param = hdev;
 248	dmaengine_submit(desc);
 249	dma_async_issue_pending(hdev->dma_lch);
 250
 251	return 0;
 252}
 253
 254static int img_hash_write_via_cpu(struct img_hash_dev *hdev)
 255{
 256	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
 257
 258	ctx->bufcnt = sg_copy_to_buffer(hdev->req->src, sg_nents(ctx->sg),
 259					ctx->buffer, hdev->req->nbytes);
 260
 261	ctx->total = hdev->req->nbytes;
 262	ctx->bufcnt = 0;
 263
 264	hdev->flags |= (DRIVER_FLAGS_CPU | DRIVER_FLAGS_FINAL);
 265
 266	img_hash_start(hdev, false);
 267
 268	return img_hash_xmit_cpu(hdev, ctx->buffer, ctx->total, 1);
 269}
 270
 271static int img_hash_finish(struct ahash_request *req)
 272{
 273	struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
 274
 275	if (!req->result)
 276		return -EINVAL;
 277
 278	memcpy(req->result, ctx->digest, ctx->digsize);
 279
 280	return 0;
 281}
 282
 283static void img_hash_copy_hash(struct ahash_request *req)
 284{
 285	struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
 286	__be32 *hash = (__be32 *)ctx->digest;
 287	int i;
 288
 289	for (i = (ctx->digsize / sizeof(*hash)) - 1; i >= 0; i--)
 290		hash[i] = img_hash_read_result_queue(ctx->hdev);
 291}
 292
 293static void img_hash_finish_req(struct ahash_request *req, int err)
 294{
 295	struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
 296	struct img_hash_dev *hdev =  ctx->hdev;
 297
 298	if (!err) {
 299		img_hash_copy_hash(req);
 300		if (DRIVER_FLAGS_FINAL & hdev->flags)
 301			err = img_hash_finish(req);
 302	} else {
 303		dev_warn(hdev->dev, "Hash failed with error %d\n", err);
 304		ctx->flags |= DRIVER_FLAGS_ERROR;
 305	}
 306
 307	hdev->flags &= ~(DRIVER_FLAGS_DMA_READY | DRIVER_FLAGS_OUTPUT_READY |
 308		DRIVER_FLAGS_CPU | DRIVER_FLAGS_BUSY | DRIVER_FLAGS_FINAL);
 309
 310	if (req->base.complete)
 311		ahash_request_complete(req, err);
 312}
 313
 314static int img_hash_write_via_dma(struct img_hash_dev *hdev)
 315{
 316	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
 317
 318	img_hash_start(hdev, true);
 319
 320	dev_dbg(hdev->dev, "xmit dma size: %d\n", ctx->total);
 321
 322	if (!ctx->total)
 323		hdev->flags |= DRIVER_FLAGS_FINAL;
 324
 325	hdev->flags |= DRIVER_FLAGS_DMA_ACTIVE | DRIVER_FLAGS_FINAL;
 326
 327	tasklet_schedule(&hdev->dma_task);
 328
 329	return -EINPROGRESS;
 330}
 331
 332static int img_hash_dma_init(struct img_hash_dev *hdev)
 333{
 334	struct dma_slave_config dma_conf;
 335	int err;
 336
 337	hdev->dma_lch = dma_request_chan(hdev->dev, "tx");
 338	if (IS_ERR(hdev->dma_lch)) {
 339		dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n");
 340		return PTR_ERR(hdev->dma_lch);
 341	}
 342	dma_conf.direction = DMA_MEM_TO_DEV;
 343	dma_conf.dst_addr = hdev->bus_addr;
 344	dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 345	dma_conf.dst_maxburst = IMG_HASH_DMA_BURST;
 346	dma_conf.device_fc = false;
 347
 348	err = dmaengine_slave_config(hdev->dma_lch,  &dma_conf);
 349	if (err) {
 350		dev_err(hdev->dev, "Couldn't configure DMA slave.\n");
 351		dma_release_channel(hdev->dma_lch);
 352		return err;
 353	}
 354
 355	return 0;
 356}
 357
 358static void img_hash_dma_task(unsigned long d)
 359{
 360	struct img_hash_dev *hdev = (struct img_hash_dev *)d;
 361	struct img_hash_request_ctx *ctx;
 362	u8 *addr;
 363	size_t nbytes, bleft, wsend, len, tbc;
 364	struct scatterlist tsg;
 365
 366	if (!hdev->req)
 367		return;
 368
 369	ctx = ahash_request_ctx(hdev->req);
 370	if (!ctx->sg)
 371		return;
 372
 373	addr = sg_virt(ctx->sg);
 374	nbytes = ctx->sg->length - ctx->offset;
 375
 376	/*
 377	 * The hash accelerator does not support a data valid mask. This means
 378	 * that if each dma (i.e. per page) is not a multiple of 4 bytes, the
 379	 * padding bytes in the last word written by that dma would erroneously
 380	 * be included in the hash. To avoid this we round down the transfer,
 381	 * and add the excess to the start of the next dma. It does not matter
 382	 * that the final dma may not be a multiple of 4 bytes as the hashing
 383	 * block is programmed to accept the correct number of bytes.
 384	 */
 385
 386	bleft = nbytes % 4;
 387	wsend = (nbytes / 4);
 388
 389	if (wsend) {
 390		sg_init_one(&tsg, addr + ctx->offset, wsend * 4);
 391		if (img_hash_xmit_dma(hdev, &tsg)) {
 392			dev_err(hdev->dev, "DMA failed, falling back to CPU");
 393			ctx->flags |= DRIVER_FLAGS_CPU;
 394			hdev->err = 0;
 395			img_hash_xmit_cpu(hdev, addr + ctx->offset,
 396					  wsend * 4, 0);
 397			ctx->sent += wsend * 4;
 398			wsend = 0;
 399		} else {
 400			ctx->sent += wsend * 4;
 401		}
 402	}
 403
 404	if (bleft) {
 405		ctx->bufcnt = sg_pcopy_to_buffer(ctx->sgfirst, ctx->nents,
 406						 ctx->buffer, bleft, ctx->sent);
 407		tbc = 0;
 408		ctx->sg = sg_next(ctx->sg);
 409		while (ctx->sg && (ctx->bufcnt < 4)) {
 410			len = ctx->sg->length;
 411			if (likely(len > (4 - ctx->bufcnt)))
 412				len = 4 - ctx->bufcnt;
 413			tbc = sg_pcopy_to_buffer(ctx->sgfirst, ctx->nents,
 414						 ctx->buffer + ctx->bufcnt, len,
 415					ctx->sent + ctx->bufcnt);
 416			ctx->bufcnt += tbc;
 417			if (tbc >= ctx->sg->length) {
 418				ctx->sg = sg_next(ctx->sg);
 419				tbc = 0;
 420			}
 421		}
 422
 423		ctx->sent += ctx->bufcnt;
 424		ctx->offset = tbc;
 425
 426		if (!wsend)
 427			img_hash_dma_callback(hdev);
 428	} else {
 429		ctx->offset = 0;
 430		ctx->sg = sg_next(ctx->sg);
 431	}
 432}
 433
 434static int img_hash_write_via_dma_stop(struct img_hash_dev *hdev)
 435{
 436	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
 437
 438	if (ctx->flags & DRIVER_FLAGS_SG)
 439		dma_unmap_sg(hdev->dev, ctx->sg, ctx->dma_ct, DMA_TO_DEVICE);
 440
 441	return 0;
 442}
 443
 444static int img_hash_process_data(struct img_hash_dev *hdev)
 445{
 446	struct ahash_request *req = hdev->req;
 447	struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
 448	int err = 0;
 449
 450	ctx->bufcnt = 0;
 451
 452	if (req->nbytes >= IMG_HASH_DMA_THRESHOLD) {
 453		dev_dbg(hdev->dev, "process data request(%d bytes) using DMA\n",
 454			req->nbytes);
 455		err = img_hash_write_via_dma(hdev);
 456	} else {
 457		dev_dbg(hdev->dev, "process data request(%d bytes) using CPU\n",
 458			req->nbytes);
 459		err = img_hash_write_via_cpu(hdev);
 460	}
 461	return err;
 462}
 463
 464static int img_hash_hw_init(struct img_hash_dev *hdev)
 465{
 466	unsigned long long nbits;
 467	u32 u, l;
 468
 469	img_hash_write(hdev, CR_RESET, CR_RESET_SET);
 470	img_hash_write(hdev, CR_RESET, CR_RESET_UNSET);
 471	img_hash_write(hdev, CR_INTENAB, CR_INT_NEW_RESULTS_SET);
 472
 473	nbits = (u64)hdev->req->nbytes << 3;
 474	u = nbits >> 32;
 475	l = nbits;
 476	img_hash_write(hdev, CR_MESSAGE_LENGTH_H, u);
 477	img_hash_write(hdev, CR_MESSAGE_LENGTH_L, l);
 478
 479	if (!(DRIVER_FLAGS_INIT & hdev->flags)) {
 480		hdev->flags |= DRIVER_FLAGS_INIT;
 481		hdev->err = 0;
 482	}
 483	dev_dbg(hdev->dev, "hw initialized, nbits: %llx\n", nbits);
 484	return 0;
 485}
 486
 487static int img_hash_init(struct ahash_request *req)
 488{
 489	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 490	struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
 491	struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 492
 493	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
 494	rctx->fallback_req.base.flags =	req->base.flags
 495		& CRYPTO_TFM_REQ_MAY_SLEEP;
 496
 497	return crypto_ahash_init(&rctx->fallback_req);
 498}
 499
 500static int img_hash_handle_queue(struct img_hash_dev *hdev,
 501				 struct ahash_request *req)
 502{
 503	struct crypto_async_request *async_req, *backlog;
 504	struct img_hash_request_ctx *ctx;
 505	unsigned long flags;
 506	int err = 0, res = 0;
 507
 508	spin_lock_irqsave(&hdev->lock, flags);
 509
 510	if (req)
 511		res = ahash_enqueue_request(&hdev->queue, req);
 512
 513	if (DRIVER_FLAGS_BUSY & hdev->flags) {
 514		spin_unlock_irqrestore(&hdev->lock, flags);
 515		return res;
 516	}
 517
 518	backlog = crypto_get_backlog(&hdev->queue);
 519	async_req = crypto_dequeue_request(&hdev->queue);
 520	if (async_req)
 521		hdev->flags |= DRIVER_FLAGS_BUSY;
 522
 523	spin_unlock_irqrestore(&hdev->lock, flags);
 524
 525	if (!async_req)
 526		return res;
 527
 528	if (backlog)
 529		crypto_request_complete(backlog, -EINPROGRESS);
 530
 531	req = ahash_request_cast(async_req);
 532	hdev->req = req;
 533
 534	ctx = ahash_request_ctx(req);
 535
 536	dev_info(hdev->dev, "processing req, op: %lu, bytes: %d\n",
 537		 ctx->op, req->nbytes);
 538
 539	err = img_hash_hw_init(hdev);
 540
 541	if (!err)
 542		err = img_hash_process_data(hdev);
 543
 544	if (err != -EINPROGRESS) {
 545		/* done_task will not finish so do it here */
 546		img_hash_finish_req(req, err);
 547	}
 548	return res;
 549}
 550
 551static int img_hash_update(struct ahash_request *req)
 552{
 553	struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
 554	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 555	struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 556
 557	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
 558	rctx->fallback_req.base.flags = req->base.flags
 559		& CRYPTO_TFM_REQ_MAY_SLEEP;
 560	rctx->fallback_req.nbytes = req->nbytes;
 561	rctx->fallback_req.src = req->src;
 562
 563	return crypto_ahash_update(&rctx->fallback_req);
 564}
 565
 566static int img_hash_final(struct ahash_request *req)
 567{
 568	struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
 569	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 570	struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 571
 572	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
 573	rctx->fallback_req.base.flags = req->base.flags
 574		& CRYPTO_TFM_REQ_MAY_SLEEP;
 575	rctx->fallback_req.result = req->result;
 576
 577	return crypto_ahash_final(&rctx->fallback_req);
 578}
 579
 580static int img_hash_finup(struct ahash_request *req)
 581{
 582	struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
 583	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 584	struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 585
 586	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
 587	rctx->fallback_req.base.flags = req->base.flags
 588		& CRYPTO_TFM_REQ_MAY_SLEEP;
 589	rctx->fallback_req.nbytes = req->nbytes;
 590	rctx->fallback_req.src = req->src;
 591	rctx->fallback_req.result = req->result;
 592
 593	return crypto_ahash_finup(&rctx->fallback_req);
 594}
 595
 596static int img_hash_import(struct ahash_request *req, const void *in)
 597{
 598	struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
 599	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 600	struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 601
 602	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
 603	rctx->fallback_req.base.flags = req->base.flags
 604		& CRYPTO_TFM_REQ_MAY_SLEEP;
 605
 606	return crypto_ahash_import(&rctx->fallback_req, in);
 607}
 608
 609static int img_hash_export(struct ahash_request *req, void *out)
 610{
 611	struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
 612	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 613	struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 614
 615	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
 616	rctx->fallback_req.base.flags = req->base.flags
 617		& CRYPTO_TFM_REQ_MAY_SLEEP;
 618
 619	return crypto_ahash_export(&rctx->fallback_req, out);
 620}
 621
 622static int img_hash_digest(struct ahash_request *req)
 623{
 624	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 625	struct img_hash_ctx *tctx = crypto_ahash_ctx(tfm);
 626	struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
 627	struct img_hash_dev *hdev = NULL;
 628	struct img_hash_dev *tmp;
 629	int err;
 630
 631	spin_lock(&img_hash.lock);
 632	if (!tctx->hdev) {
 633		list_for_each_entry(tmp, &img_hash.dev_list, list) {
 634			hdev = tmp;
 635			break;
 636		}
 637		tctx->hdev = hdev;
 638
 639	} else {
 640		hdev = tctx->hdev;
 641	}
 642
 643	spin_unlock(&img_hash.lock);
 644	ctx->hdev = hdev;
 645	ctx->flags = 0;
 646	ctx->digsize = crypto_ahash_digestsize(tfm);
 647
 648	switch (ctx->digsize) {
 649	case SHA1_DIGEST_SIZE:
 650		ctx->flags |= DRIVER_FLAGS_SHA1;
 651		break;
 652	case SHA256_DIGEST_SIZE:
 653		ctx->flags |= DRIVER_FLAGS_SHA256;
 654		break;
 655	case SHA224_DIGEST_SIZE:
 656		ctx->flags |= DRIVER_FLAGS_SHA224;
 657		break;
 658	case MD5_DIGEST_SIZE:
 659		ctx->flags |= DRIVER_FLAGS_MD5;
 660		break;
 661	default:
 662		return -EINVAL;
 663	}
 664
 665	ctx->bufcnt = 0;
 666	ctx->offset = 0;
 667	ctx->sent = 0;
 668	ctx->total = req->nbytes;
 669	ctx->sg = req->src;
 670	ctx->sgfirst = req->src;
 671	ctx->nents = sg_nents(ctx->sg);
 672
 673	err = img_hash_handle_queue(tctx->hdev, req);
 674
 675	return err;
 676}
 677
 678static int img_hash_cra_init(struct crypto_tfm *tfm, const char *alg_name)
 679{
 680	struct img_hash_ctx *ctx = crypto_tfm_ctx(tfm);
 
 681
 682	ctx->fallback = crypto_alloc_ahash(alg_name, 0,
 683					   CRYPTO_ALG_NEED_FALLBACK);
 684	if (IS_ERR(ctx->fallback)) {
 685		pr_err("img_hash: Could not load fallback driver.\n");
 686		return PTR_ERR(ctx->fallback);
 
 687	}
 688	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 689				 sizeof(struct img_hash_request_ctx) +
 690				 crypto_ahash_reqsize(ctx->fallback) +
 691				 IMG_HASH_DMA_THRESHOLD);
 692
 693	return 0;
 
 
 
 694}
 695
 696static int img_hash_cra_md5_init(struct crypto_tfm *tfm)
 697{
 698	return img_hash_cra_init(tfm, "md5-generic");
 699}
 700
 701static int img_hash_cra_sha1_init(struct crypto_tfm *tfm)
 702{
 703	return img_hash_cra_init(tfm, "sha1-generic");
 704}
 705
 706static int img_hash_cra_sha224_init(struct crypto_tfm *tfm)
 707{
 708	return img_hash_cra_init(tfm, "sha224-generic");
 709}
 710
 711static int img_hash_cra_sha256_init(struct crypto_tfm *tfm)
 712{
 713	return img_hash_cra_init(tfm, "sha256-generic");
 714}
 715
 716static void img_hash_cra_exit(struct crypto_tfm *tfm)
 717{
 718	struct img_hash_ctx *tctx = crypto_tfm_ctx(tfm);
 719
 720	crypto_free_ahash(tctx->fallback);
 721}
 722
 723static irqreturn_t img_irq_handler(int irq, void *dev_id)
 724{
 725	struct img_hash_dev *hdev = dev_id;
 726	u32 reg;
 727
 728	reg = img_hash_read(hdev, CR_INTSTAT);
 729	img_hash_write(hdev, CR_INTCLEAR, reg);
 730
 731	if (reg & CR_INT_NEW_RESULTS_SET) {
 732		dev_dbg(hdev->dev, "IRQ CR_INT_NEW_RESULTS_SET\n");
 733		if (DRIVER_FLAGS_BUSY & hdev->flags) {
 734			hdev->flags |= DRIVER_FLAGS_OUTPUT_READY;
 735			if (!(DRIVER_FLAGS_CPU & hdev->flags))
 736				hdev->flags |= DRIVER_FLAGS_DMA_READY;
 737			tasklet_schedule(&hdev->done_task);
 738		} else {
 739			dev_warn(hdev->dev,
 740				 "HASH interrupt when no active requests.\n");
 741		}
 742	} else if (reg & CR_INT_RESULTS_AVAILABLE) {
 743		dev_warn(hdev->dev,
 744			 "IRQ triggered before the hash had completed\n");
 745	} else if (reg & CR_INT_RESULT_READ_ERR) {
 746		dev_warn(hdev->dev,
 747			 "Attempt to read from an empty result queue\n");
 748	} else if (reg & CR_INT_MESSAGE_WRITE_ERROR) {
 749		dev_warn(hdev->dev,
 750			 "Data written before the hardware was configured\n");
 751	}
 752	return IRQ_HANDLED;
 753}
 754
 755static struct ahash_alg img_algs[] = {
 756	{
 757		.init = img_hash_init,
 758		.update = img_hash_update,
 759		.final = img_hash_final,
 760		.finup = img_hash_finup,
 761		.export = img_hash_export,
 762		.import = img_hash_import,
 763		.digest = img_hash_digest,
 764		.halg = {
 765			.digestsize = MD5_DIGEST_SIZE,
 766			.statesize = sizeof(struct md5_state),
 767			.base = {
 768				.cra_name = "md5",
 769				.cra_driver_name = "img-md5",
 770				.cra_priority = 300,
 771				.cra_flags =
 772				CRYPTO_ALG_ASYNC |
 773				CRYPTO_ALG_NEED_FALLBACK,
 774				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
 775				.cra_ctxsize = sizeof(struct img_hash_ctx),
 776				.cra_init = img_hash_cra_md5_init,
 777				.cra_exit = img_hash_cra_exit,
 778				.cra_module = THIS_MODULE,
 779			}
 780		}
 781	},
 782	{
 783		.init = img_hash_init,
 784		.update = img_hash_update,
 785		.final = img_hash_final,
 786		.finup = img_hash_finup,
 787		.export = img_hash_export,
 788		.import = img_hash_import,
 789		.digest = img_hash_digest,
 790		.halg = {
 791			.digestsize = SHA1_DIGEST_SIZE,
 792			.statesize = sizeof(struct sha1_state),
 793			.base = {
 794				.cra_name = "sha1",
 795				.cra_driver_name = "img-sha1",
 796				.cra_priority = 300,
 797				.cra_flags =
 798				CRYPTO_ALG_ASYNC |
 799				CRYPTO_ALG_NEED_FALLBACK,
 800				.cra_blocksize = SHA1_BLOCK_SIZE,
 801				.cra_ctxsize = sizeof(struct img_hash_ctx),
 802				.cra_init = img_hash_cra_sha1_init,
 803				.cra_exit = img_hash_cra_exit,
 804				.cra_module = THIS_MODULE,
 805			}
 806		}
 807	},
 808	{
 809		.init = img_hash_init,
 810		.update = img_hash_update,
 811		.final = img_hash_final,
 812		.finup = img_hash_finup,
 813		.export = img_hash_export,
 814		.import = img_hash_import,
 815		.digest = img_hash_digest,
 816		.halg = {
 817			.digestsize = SHA224_DIGEST_SIZE,
 818			.statesize = sizeof(struct sha256_state),
 819			.base = {
 820				.cra_name = "sha224",
 821				.cra_driver_name = "img-sha224",
 822				.cra_priority = 300,
 823				.cra_flags =
 824				CRYPTO_ALG_ASYNC |
 825				CRYPTO_ALG_NEED_FALLBACK,
 826				.cra_blocksize = SHA224_BLOCK_SIZE,
 827				.cra_ctxsize = sizeof(struct img_hash_ctx),
 828				.cra_init = img_hash_cra_sha224_init,
 829				.cra_exit = img_hash_cra_exit,
 830				.cra_module = THIS_MODULE,
 831			}
 832		}
 833	},
 834	{
 835		.init = img_hash_init,
 836		.update = img_hash_update,
 837		.final = img_hash_final,
 838		.finup = img_hash_finup,
 839		.export = img_hash_export,
 840		.import = img_hash_import,
 841		.digest = img_hash_digest,
 842		.halg = {
 843			.digestsize = SHA256_DIGEST_SIZE,
 844			.statesize = sizeof(struct sha256_state),
 845			.base = {
 846				.cra_name = "sha256",
 847				.cra_driver_name = "img-sha256",
 848				.cra_priority = 300,
 849				.cra_flags =
 850				CRYPTO_ALG_ASYNC |
 851				CRYPTO_ALG_NEED_FALLBACK,
 852				.cra_blocksize = SHA256_BLOCK_SIZE,
 853				.cra_ctxsize = sizeof(struct img_hash_ctx),
 854				.cra_init = img_hash_cra_sha256_init,
 855				.cra_exit = img_hash_cra_exit,
 856				.cra_module = THIS_MODULE,
 857			}
 858		}
 859	}
 860};
 861
 862static int img_register_algs(struct img_hash_dev *hdev)
 863{
 864	int i, err;
 865
 866	for (i = 0; i < ARRAY_SIZE(img_algs); i++) {
 867		err = crypto_register_ahash(&img_algs[i]);
 868		if (err)
 869			goto err_reg;
 870	}
 871	return 0;
 872
 873err_reg:
 874	for (; i--; )
 875		crypto_unregister_ahash(&img_algs[i]);
 876
 877	return err;
 878}
 879
 880static int img_unregister_algs(struct img_hash_dev *hdev)
 881{
 882	int i;
 883
 884	for (i = 0; i < ARRAY_SIZE(img_algs); i++)
 885		crypto_unregister_ahash(&img_algs[i]);
 886	return 0;
 887}
 888
 889static void img_hash_done_task(unsigned long data)
 890{
 891	struct img_hash_dev *hdev = (struct img_hash_dev *)data;
 892	int err = 0;
 893
 894	if (hdev->err == -EINVAL) {
 895		err = hdev->err;
 896		goto finish;
 897	}
 898
 899	if (!(DRIVER_FLAGS_BUSY & hdev->flags)) {
 900		img_hash_handle_queue(hdev, NULL);
 901		return;
 902	}
 903
 904	if (DRIVER_FLAGS_CPU & hdev->flags) {
 905		if (DRIVER_FLAGS_OUTPUT_READY & hdev->flags) {
 906			hdev->flags &= ~DRIVER_FLAGS_OUTPUT_READY;
 907			goto finish;
 908		}
 909	} else if (DRIVER_FLAGS_DMA_READY & hdev->flags) {
 910		if (DRIVER_FLAGS_DMA_ACTIVE & hdev->flags) {
 911			hdev->flags &= ~DRIVER_FLAGS_DMA_ACTIVE;
 912			img_hash_write_via_dma_stop(hdev);
 913			if (hdev->err) {
 914				err = hdev->err;
 915				goto finish;
 916			}
 917		}
 918		if (DRIVER_FLAGS_OUTPUT_READY & hdev->flags) {
 919			hdev->flags &= ~(DRIVER_FLAGS_DMA_READY |
 920					DRIVER_FLAGS_OUTPUT_READY);
 921			goto finish;
 922		}
 923	}
 924	return;
 925
 926finish:
 927	img_hash_finish_req(hdev->req, err);
 928}
 929
 930static const struct of_device_id img_hash_match[] __maybe_unused = {
 931	{ .compatible = "img,hash-accelerator" },
 932	{}
 933};
 934MODULE_DEVICE_TABLE(of, img_hash_match);
 935
 936static int img_hash_probe(struct platform_device *pdev)
 937{
 938	struct img_hash_dev *hdev;
 939	struct device *dev = &pdev->dev;
 940	struct resource *hash_res;
 941	int	irq;
 942	int err;
 943
 944	hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL);
 945	if (hdev == NULL)
 946		return -ENOMEM;
 947
 948	spin_lock_init(&hdev->lock);
 949
 950	hdev->dev = dev;
 951
 952	platform_set_drvdata(pdev, hdev);
 953
 954	INIT_LIST_HEAD(&hdev->list);
 955
 956	tasklet_init(&hdev->done_task, img_hash_done_task, (unsigned long)hdev);
 957	tasklet_init(&hdev->dma_task, img_hash_dma_task, (unsigned long)hdev);
 958
 959	crypto_init_queue(&hdev->queue, IMG_HASH_QUEUE_LENGTH);
 960
 961	/* Register bank */
 962	hdev->io_base = devm_platform_ioremap_resource(pdev, 0);
 
 
 963	if (IS_ERR(hdev->io_base)) {
 964		err = PTR_ERR(hdev->io_base);
 
 
 965		goto res_err;
 966	}
 967
 968	/* Write port (DMA or CPU) */
 969	hdev->cpu_addr = devm_platform_get_and_ioremap_resource(pdev, 1, &hash_res);
 
 970	if (IS_ERR(hdev->cpu_addr)) {
 
 971		err = PTR_ERR(hdev->cpu_addr);
 972		goto res_err;
 973	}
 974	hdev->bus_addr = hash_res->start;
 975
 976	irq = platform_get_irq(pdev, 0);
 977	if (irq < 0) {
 
 978		err = irq;
 979		goto res_err;
 980	}
 981
 982	err = devm_request_irq(dev, irq, img_irq_handler, 0,
 983			       dev_name(dev), hdev);
 984	if (err) {
 985		dev_err(dev, "unable to request irq\n");
 986		goto res_err;
 987	}
 988	dev_dbg(dev, "using IRQ channel %d\n", irq);
 989
 990	hdev->hash_clk = devm_clk_get(&pdev->dev, "hash");
 991	if (IS_ERR(hdev->hash_clk)) {
 992		dev_err(dev, "clock initialization failed.\n");
 993		err = PTR_ERR(hdev->hash_clk);
 994		goto res_err;
 995	}
 996
 997	hdev->sys_clk = devm_clk_get(&pdev->dev, "sys");
 998	if (IS_ERR(hdev->sys_clk)) {
 999		dev_err(dev, "clock initialization failed.\n");
1000		err = PTR_ERR(hdev->sys_clk);
1001		goto res_err;
1002	}
1003
1004	err = clk_prepare_enable(hdev->hash_clk);
1005	if (err)
1006		goto res_err;
1007
1008	err = clk_prepare_enable(hdev->sys_clk);
1009	if (err)
1010		goto clk_err;
1011
1012	err = img_hash_dma_init(hdev);
1013	if (err)
1014		goto dma_err;
1015
1016	dev_dbg(dev, "using %s for DMA transfers\n",
1017		dma_chan_name(hdev->dma_lch));
1018
1019	spin_lock(&img_hash.lock);
1020	list_add_tail(&hdev->list, &img_hash.dev_list);
1021	spin_unlock(&img_hash.lock);
1022
1023	err = img_register_algs(hdev);
1024	if (err)
1025		goto err_algs;
1026	dev_info(dev, "Img MD5/SHA1/SHA224/SHA256 Hardware accelerator initialized\n");
1027
1028	return 0;
1029
1030err_algs:
1031	spin_lock(&img_hash.lock);
1032	list_del(&hdev->list);
1033	spin_unlock(&img_hash.lock);
1034	dma_release_channel(hdev->dma_lch);
1035dma_err:
1036	clk_disable_unprepare(hdev->sys_clk);
1037clk_err:
1038	clk_disable_unprepare(hdev->hash_clk);
1039res_err:
1040	tasklet_kill(&hdev->done_task);
1041	tasklet_kill(&hdev->dma_task);
1042
1043	return err;
1044}
1045
1046static void img_hash_remove(struct platform_device *pdev)
1047{
1048	struct img_hash_dev *hdev;
1049
1050	hdev = platform_get_drvdata(pdev);
1051	spin_lock(&img_hash.lock);
1052	list_del(&hdev->list);
1053	spin_unlock(&img_hash.lock);
1054
1055	img_unregister_algs(hdev);
1056
1057	tasklet_kill(&hdev->done_task);
1058	tasklet_kill(&hdev->dma_task);
1059
1060	dma_release_channel(hdev->dma_lch);
1061
1062	clk_disable_unprepare(hdev->hash_clk);
1063	clk_disable_unprepare(hdev->sys_clk);
 
 
1064}
1065
1066#ifdef CONFIG_PM_SLEEP
1067static int img_hash_suspend(struct device *dev)
1068{
1069	struct img_hash_dev *hdev = dev_get_drvdata(dev);
1070
1071	clk_disable_unprepare(hdev->hash_clk);
1072	clk_disable_unprepare(hdev->sys_clk);
1073
1074	return 0;
1075}
1076
1077static int img_hash_resume(struct device *dev)
1078{
1079	struct img_hash_dev *hdev = dev_get_drvdata(dev);
1080	int ret;
1081
1082	ret = clk_prepare_enable(hdev->hash_clk);
1083	if (ret)
1084		return ret;
1085
1086	ret = clk_prepare_enable(hdev->sys_clk);
1087	if (ret) {
1088		clk_disable_unprepare(hdev->hash_clk);
1089		return ret;
1090	}
1091
1092	return 0;
1093}
1094#endif /* CONFIG_PM_SLEEP */
1095
1096static const struct dev_pm_ops img_hash_pm_ops = {
1097	SET_SYSTEM_SLEEP_PM_OPS(img_hash_suspend, img_hash_resume)
1098};
1099
1100static struct platform_driver img_hash_driver = {
1101	.probe		= img_hash_probe,
1102	.remove_new	= img_hash_remove,
1103	.driver		= {
1104		.name	= "img-hash-accelerator",
1105		.pm	= &img_hash_pm_ops,
1106		.of_match_table	= img_hash_match,
1107	}
1108};
1109module_platform_driver(img_hash_driver);
1110
1111MODULE_LICENSE("GPL v2");
1112MODULE_DESCRIPTION("Imgtec SHA1/224/256 & MD5 hw accelerator driver");
1113MODULE_AUTHOR("Will Thomas.");
1114MODULE_AUTHOR("James Hartley <james.hartley@imgtec.com>");
v4.10.11
 
   1/*
   2 * Copyright (c) 2014 Imagination Technologies
   3 * Authors:  Will Thomas, James Hartley
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License version 2 as published
   7 * by the Free Software Foundation.
   8 *
   9 *	Interface structure taken from omap-sham driver
  10 */
  11
  12#include <linux/clk.h>
 
  13#include <linux/dmaengine.h>
  14#include <linux/interrupt.h>
  15#include <linux/io.h>
  16#include <linux/kernel.h>
  17#include <linux/module.h>
  18#include <linux/of_device.h>
  19#include <linux/platform_device.h>
  20#include <linux/scatterlist.h>
  21
  22#include <crypto/internal/hash.h>
  23#include <crypto/md5.h>
  24#include <crypto/sha.h>
 
  25
  26#define CR_RESET			0
  27#define CR_RESET_SET			1
  28#define CR_RESET_UNSET			0
  29
  30#define CR_MESSAGE_LENGTH_H		0x4
  31#define CR_MESSAGE_LENGTH_L		0x8
  32
  33#define CR_CONTROL			0xc
  34#define CR_CONTROL_BYTE_ORDER_3210	0
  35#define CR_CONTROL_BYTE_ORDER_0123	1
  36#define CR_CONTROL_BYTE_ORDER_2310	2
  37#define CR_CONTROL_BYTE_ORDER_1032	3
  38#define CR_CONTROL_BYTE_ORDER_SHIFT	8
  39#define CR_CONTROL_ALGO_MD5	0
  40#define CR_CONTROL_ALGO_SHA1	1
  41#define CR_CONTROL_ALGO_SHA224	2
  42#define CR_CONTROL_ALGO_SHA256	3
  43
  44#define CR_INTSTAT			0x10
  45#define CR_INTENAB			0x14
  46#define CR_INTCLEAR			0x18
  47#define CR_INT_RESULTS_AVAILABLE	BIT(0)
  48#define CR_INT_NEW_RESULTS_SET		BIT(1)
  49#define CR_INT_RESULT_READ_ERR		BIT(2)
  50#define CR_INT_MESSAGE_WRITE_ERROR	BIT(3)
  51#define CR_INT_STATUS			BIT(8)
  52
  53#define CR_RESULT_QUEUE		0x1c
  54#define CR_RSD0				0x40
  55#define CR_CORE_REV			0x50
  56#define CR_CORE_DES1		0x60
  57#define CR_CORE_DES2		0x70
  58
  59#define DRIVER_FLAGS_BUSY		BIT(0)
  60#define DRIVER_FLAGS_FINAL		BIT(1)
  61#define DRIVER_FLAGS_DMA_ACTIVE		BIT(2)
  62#define DRIVER_FLAGS_OUTPUT_READY	BIT(3)
  63#define DRIVER_FLAGS_INIT		BIT(4)
  64#define DRIVER_FLAGS_CPU		BIT(5)
  65#define DRIVER_FLAGS_DMA_READY		BIT(6)
  66#define DRIVER_FLAGS_ERROR		BIT(7)
  67#define DRIVER_FLAGS_SG			BIT(8)
  68#define DRIVER_FLAGS_SHA1		BIT(18)
  69#define DRIVER_FLAGS_SHA224		BIT(19)
  70#define DRIVER_FLAGS_SHA256		BIT(20)
  71#define DRIVER_FLAGS_MD5		BIT(21)
  72
  73#define IMG_HASH_QUEUE_LENGTH		20
  74#define IMG_HASH_DMA_BURST		4
  75#define IMG_HASH_DMA_THRESHOLD		64
  76
  77#ifdef __LITTLE_ENDIAN
  78#define IMG_HASH_BYTE_ORDER		CR_CONTROL_BYTE_ORDER_3210
  79#else
  80#define IMG_HASH_BYTE_ORDER		CR_CONTROL_BYTE_ORDER_0123
  81#endif
  82
  83struct img_hash_dev;
  84
  85struct img_hash_request_ctx {
  86	struct img_hash_dev	*hdev;
  87	u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
  88	unsigned long		flags;
  89	size_t			digsize;
  90
  91	dma_addr_t		dma_addr;
  92	size_t			dma_ct;
  93
  94	/* sg root */
  95	struct scatterlist	*sgfirst;
  96	/* walk state */
  97	struct scatterlist	*sg;
  98	size_t			nents;
  99	size_t			offset;
 100	unsigned int		total;
 101	size_t			sent;
 102
 103	unsigned long		op;
 104
 105	size_t			bufcnt;
 106	struct ahash_request	fallback_req;
 107
 108	/* Zero length buffer must remain last member of struct */
 109	u8 buffer[0] __aligned(sizeof(u32));
 110};
 111
 112struct img_hash_ctx {
 113	struct img_hash_dev	*hdev;
 114	unsigned long		flags;
 115	struct crypto_ahash	*fallback;
 116};
 117
 118struct img_hash_dev {
 119	struct list_head	list;
 120	struct device		*dev;
 121	struct clk		*hash_clk;
 122	struct clk		*sys_clk;
 123	void __iomem		*io_base;
 124
 125	phys_addr_t		bus_addr;
 126	void __iomem		*cpu_addr;
 127
 128	spinlock_t		lock;
 129	int			err;
 130	struct tasklet_struct	done_task;
 131	struct tasklet_struct	dma_task;
 132
 133	unsigned long		flags;
 134	struct crypto_queue	queue;
 135	struct ahash_request	*req;
 136
 137	struct dma_chan		*dma_lch;
 138};
 139
 140struct img_hash_drv {
 141	struct list_head dev_list;
 142	spinlock_t lock;
 143};
 144
 145static struct img_hash_drv img_hash = {
 146	.dev_list = LIST_HEAD_INIT(img_hash.dev_list),
 147	.lock = __SPIN_LOCK_UNLOCKED(img_hash.lock),
 148};
 149
 150static inline u32 img_hash_read(struct img_hash_dev *hdev, u32 offset)
 151{
 152	return readl_relaxed(hdev->io_base + offset);
 153}
 154
 155static inline void img_hash_write(struct img_hash_dev *hdev,
 156				  u32 offset, u32 value)
 157{
 158	writel_relaxed(value, hdev->io_base + offset);
 159}
 160
 161static inline u32 img_hash_read_result_queue(struct img_hash_dev *hdev)
 162{
 163	return be32_to_cpu(img_hash_read(hdev, CR_RESULT_QUEUE));
 164}
 165
 166static void img_hash_start(struct img_hash_dev *hdev, bool dma)
 167{
 168	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
 169	u32 cr = IMG_HASH_BYTE_ORDER << CR_CONTROL_BYTE_ORDER_SHIFT;
 170
 171	if (ctx->flags & DRIVER_FLAGS_MD5)
 172		cr |= CR_CONTROL_ALGO_MD5;
 173	else if (ctx->flags & DRIVER_FLAGS_SHA1)
 174		cr |= CR_CONTROL_ALGO_SHA1;
 175	else if (ctx->flags & DRIVER_FLAGS_SHA224)
 176		cr |= CR_CONTROL_ALGO_SHA224;
 177	else if (ctx->flags & DRIVER_FLAGS_SHA256)
 178		cr |= CR_CONTROL_ALGO_SHA256;
 179	dev_dbg(hdev->dev, "Starting hash process\n");
 180	img_hash_write(hdev, CR_CONTROL, cr);
 181
 182	/*
 183	 * The hardware block requires two cycles between writing the control
 184	 * register and writing the first word of data in non DMA mode, to
 185	 * ensure the first data write is not grouped in burst with the control
 186	 * register write a read is issued to 'flush' the bus.
 187	 */
 188	if (!dma)
 189		img_hash_read(hdev, CR_CONTROL);
 190}
 191
 192static int img_hash_xmit_cpu(struct img_hash_dev *hdev, const u8 *buf,
 193			     size_t length, int final)
 194{
 195	u32 count, len32;
 196	const u32 *buffer = (const u32 *)buf;
 197
 198	dev_dbg(hdev->dev, "xmit_cpu:  length: %zu bytes\n", length);
 199
 200	if (final)
 201		hdev->flags |= DRIVER_FLAGS_FINAL;
 202
 203	len32 = DIV_ROUND_UP(length, sizeof(u32));
 204
 205	for (count = 0; count < len32; count++)
 206		writel_relaxed(buffer[count], hdev->cpu_addr);
 207
 208	return -EINPROGRESS;
 209}
 210
 211static void img_hash_dma_callback(void *data)
 212{
 213	struct img_hash_dev *hdev = (struct img_hash_dev *)data;
 214	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
 215
 216	if (ctx->bufcnt) {
 217		img_hash_xmit_cpu(hdev, ctx->buffer, ctx->bufcnt, 0);
 218		ctx->bufcnt = 0;
 219	}
 220	if (ctx->sg)
 221		tasklet_schedule(&hdev->dma_task);
 222}
 223
 224static int img_hash_xmit_dma(struct img_hash_dev *hdev, struct scatterlist *sg)
 225{
 226	struct dma_async_tx_descriptor *desc;
 227	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
 228
 229	ctx->dma_ct = dma_map_sg(hdev->dev, sg, 1, DMA_MEM_TO_DEV);
 230	if (ctx->dma_ct == 0) {
 231		dev_err(hdev->dev, "Invalid DMA sg\n");
 232		hdev->err = -EINVAL;
 233		return -EINVAL;
 234	}
 235
 236	desc = dmaengine_prep_slave_sg(hdev->dma_lch,
 237				       sg,
 238				       ctx->dma_ct,
 239				       DMA_MEM_TO_DEV,
 240				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 241	if (!desc) {
 242		dev_err(hdev->dev, "Null DMA descriptor\n");
 243		hdev->err = -EINVAL;
 244		dma_unmap_sg(hdev->dev, sg, 1, DMA_MEM_TO_DEV);
 245		return -EINVAL;
 246	}
 247	desc->callback = img_hash_dma_callback;
 248	desc->callback_param = hdev;
 249	dmaengine_submit(desc);
 250	dma_async_issue_pending(hdev->dma_lch);
 251
 252	return 0;
 253}
 254
 255static int img_hash_write_via_cpu(struct img_hash_dev *hdev)
 256{
 257	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
 258
 259	ctx->bufcnt = sg_copy_to_buffer(hdev->req->src, sg_nents(ctx->sg),
 260					ctx->buffer, hdev->req->nbytes);
 261
 262	ctx->total = hdev->req->nbytes;
 263	ctx->bufcnt = 0;
 264
 265	hdev->flags |= (DRIVER_FLAGS_CPU | DRIVER_FLAGS_FINAL);
 266
 267	img_hash_start(hdev, false);
 268
 269	return img_hash_xmit_cpu(hdev, ctx->buffer, ctx->total, 1);
 270}
 271
 272static int img_hash_finish(struct ahash_request *req)
 273{
 274	struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
 275
 276	if (!req->result)
 277		return -EINVAL;
 278
 279	memcpy(req->result, ctx->digest, ctx->digsize);
 280
 281	return 0;
 282}
 283
 284static void img_hash_copy_hash(struct ahash_request *req)
 285{
 286	struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
 287	u32 *hash = (u32 *)ctx->digest;
 288	int i;
 289
 290	for (i = (ctx->digsize / sizeof(u32)) - 1; i >= 0; i--)
 291		hash[i] = img_hash_read_result_queue(ctx->hdev);
 292}
 293
 294static void img_hash_finish_req(struct ahash_request *req, int err)
 295{
 296	struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
 297	struct img_hash_dev *hdev =  ctx->hdev;
 298
 299	if (!err) {
 300		img_hash_copy_hash(req);
 301		if (DRIVER_FLAGS_FINAL & hdev->flags)
 302			err = img_hash_finish(req);
 303	} else {
 304		dev_warn(hdev->dev, "Hash failed with error %d\n", err);
 305		ctx->flags |= DRIVER_FLAGS_ERROR;
 306	}
 307
 308	hdev->flags &= ~(DRIVER_FLAGS_DMA_READY | DRIVER_FLAGS_OUTPUT_READY |
 309		DRIVER_FLAGS_CPU | DRIVER_FLAGS_BUSY | DRIVER_FLAGS_FINAL);
 310
 311	if (req->base.complete)
 312		req->base.complete(&req->base, err);
 313}
 314
 315static int img_hash_write_via_dma(struct img_hash_dev *hdev)
 316{
 317	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
 318
 319	img_hash_start(hdev, true);
 320
 321	dev_dbg(hdev->dev, "xmit dma size: %d\n", ctx->total);
 322
 323	if (!ctx->total)
 324		hdev->flags |= DRIVER_FLAGS_FINAL;
 325
 326	hdev->flags |= DRIVER_FLAGS_DMA_ACTIVE | DRIVER_FLAGS_FINAL;
 327
 328	tasklet_schedule(&hdev->dma_task);
 329
 330	return -EINPROGRESS;
 331}
 332
 333static int img_hash_dma_init(struct img_hash_dev *hdev)
 334{
 335	struct dma_slave_config dma_conf;
 336	int err = -EINVAL;
 337
 338	hdev->dma_lch = dma_request_slave_channel(hdev->dev, "tx");
 339	if (!hdev->dma_lch) {
 340		dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n");
 341		return -EBUSY;
 342	}
 343	dma_conf.direction = DMA_MEM_TO_DEV;
 344	dma_conf.dst_addr = hdev->bus_addr;
 345	dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 346	dma_conf.dst_maxburst = IMG_HASH_DMA_BURST;
 347	dma_conf.device_fc = false;
 348
 349	err = dmaengine_slave_config(hdev->dma_lch,  &dma_conf);
 350	if (err) {
 351		dev_err(hdev->dev, "Couldn't configure DMA slave.\n");
 352		dma_release_channel(hdev->dma_lch);
 353		return err;
 354	}
 355
 356	return 0;
 357}
 358
 359static void img_hash_dma_task(unsigned long d)
 360{
 361	struct img_hash_dev *hdev = (struct img_hash_dev *)d;
 362	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
 363	u8 *addr;
 364	size_t nbytes, bleft, wsend, len, tbc;
 365	struct scatterlist tsg;
 366
 367	if (!hdev->req || !ctx->sg)
 
 
 
 
 368		return;
 369
 370	addr = sg_virt(ctx->sg);
 371	nbytes = ctx->sg->length - ctx->offset;
 372
 373	/*
 374	 * The hash accelerator does not support a data valid mask. This means
 375	 * that if each dma (i.e. per page) is not a multiple of 4 bytes, the
 376	 * padding bytes in the last word written by that dma would erroneously
 377	 * be included in the hash. To avoid this we round down the transfer,
 378	 * and add the excess to the start of the next dma. It does not matter
 379	 * that the final dma may not be a multiple of 4 bytes as the hashing
 380	 * block is programmed to accept the correct number of bytes.
 381	 */
 382
 383	bleft = nbytes % 4;
 384	wsend = (nbytes / 4);
 385
 386	if (wsend) {
 387		sg_init_one(&tsg, addr + ctx->offset, wsend * 4);
 388		if (img_hash_xmit_dma(hdev, &tsg)) {
 389			dev_err(hdev->dev, "DMA failed, falling back to CPU");
 390			ctx->flags |= DRIVER_FLAGS_CPU;
 391			hdev->err = 0;
 392			img_hash_xmit_cpu(hdev, addr + ctx->offset,
 393					  wsend * 4, 0);
 394			ctx->sent += wsend * 4;
 395			wsend = 0;
 396		} else {
 397			ctx->sent += wsend * 4;
 398		}
 399	}
 400
 401	if (bleft) {
 402		ctx->bufcnt = sg_pcopy_to_buffer(ctx->sgfirst, ctx->nents,
 403						 ctx->buffer, bleft, ctx->sent);
 404		tbc = 0;
 405		ctx->sg = sg_next(ctx->sg);
 406		while (ctx->sg && (ctx->bufcnt < 4)) {
 407			len = ctx->sg->length;
 408			if (likely(len > (4 - ctx->bufcnt)))
 409				len = 4 - ctx->bufcnt;
 410			tbc = sg_pcopy_to_buffer(ctx->sgfirst, ctx->nents,
 411						 ctx->buffer + ctx->bufcnt, len,
 412					ctx->sent + ctx->bufcnt);
 413			ctx->bufcnt += tbc;
 414			if (tbc >= ctx->sg->length) {
 415				ctx->sg = sg_next(ctx->sg);
 416				tbc = 0;
 417			}
 418		}
 419
 420		ctx->sent += ctx->bufcnt;
 421		ctx->offset = tbc;
 422
 423		if (!wsend)
 424			img_hash_dma_callback(hdev);
 425	} else {
 426		ctx->offset = 0;
 427		ctx->sg = sg_next(ctx->sg);
 428	}
 429}
 430
 431static int img_hash_write_via_dma_stop(struct img_hash_dev *hdev)
 432{
 433	struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
 434
 435	if (ctx->flags & DRIVER_FLAGS_SG)
 436		dma_unmap_sg(hdev->dev, ctx->sg, ctx->dma_ct, DMA_TO_DEVICE);
 437
 438	return 0;
 439}
 440
 441static int img_hash_process_data(struct img_hash_dev *hdev)
 442{
 443	struct ahash_request *req = hdev->req;
 444	struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
 445	int err = 0;
 446
 447	ctx->bufcnt = 0;
 448
 449	if (req->nbytes >= IMG_HASH_DMA_THRESHOLD) {
 450		dev_dbg(hdev->dev, "process data request(%d bytes) using DMA\n",
 451			req->nbytes);
 452		err = img_hash_write_via_dma(hdev);
 453	} else {
 454		dev_dbg(hdev->dev, "process data request(%d bytes) using CPU\n",
 455			req->nbytes);
 456		err = img_hash_write_via_cpu(hdev);
 457	}
 458	return err;
 459}
 460
 461static int img_hash_hw_init(struct img_hash_dev *hdev)
 462{
 463	unsigned long long nbits;
 464	u32 u, l;
 465
 466	img_hash_write(hdev, CR_RESET, CR_RESET_SET);
 467	img_hash_write(hdev, CR_RESET, CR_RESET_UNSET);
 468	img_hash_write(hdev, CR_INTENAB, CR_INT_NEW_RESULTS_SET);
 469
 470	nbits = (u64)hdev->req->nbytes << 3;
 471	u = nbits >> 32;
 472	l = nbits;
 473	img_hash_write(hdev, CR_MESSAGE_LENGTH_H, u);
 474	img_hash_write(hdev, CR_MESSAGE_LENGTH_L, l);
 475
 476	if (!(DRIVER_FLAGS_INIT & hdev->flags)) {
 477		hdev->flags |= DRIVER_FLAGS_INIT;
 478		hdev->err = 0;
 479	}
 480	dev_dbg(hdev->dev, "hw initialized, nbits: %llx\n", nbits);
 481	return 0;
 482}
 483
 484static int img_hash_init(struct ahash_request *req)
 485{
 486	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 487	struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
 488	struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 489
 490	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
 491	rctx->fallback_req.base.flags =	req->base.flags
 492		& CRYPTO_TFM_REQ_MAY_SLEEP;
 493
 494	return crypto_ahash_init(&rctx->fallback_req);
 495}
 496
 497static int img_hash_handle_queue(struct img_hash_dev *hdev,
 498				 struct ahash_request *req)
 499{
 500	struct crypto_async_request *async_req, *backlog;
 501	struct img_hash_request_ctx *ctx;
 502	unsigned long flags;
 503	int err = 0, res = 0;
 504
 505	spin_lock_irqsave(&hdev->lock, flags);
 506
 507	if (req)
 508		res = ahash_enqueue_request(&hdev->queue, req);
 509
 510	if (DRIVER_FLAGS_BUSY & hdev->flags) {
 511		spin_unlock_irqrestore(&hdev->lock, flags);
 512		return res;
 513	}
 514
 515	backlog = crypto_get_backlog(&hdev->queue);
 516	async_req = crypto_dequeue_request(&hdev->queue);
 517	if (async_req)
 518		hdev->flags |= DRIVER_FLAGS_BUSY;
 519
 520	spin_unlock_irqrestore(&hdev->lock, flags);
 521
 522	if (!async_req)
 523		return res;
 524
 525	if (backlog)
 526		backlog->complete(backlog, -EINPROGRESS);
 527
 528	req = ahash_request_cast(async_req);
 529	hdev->req = req;
 530
 531	ctx = ahash_request_ctx(req);
 532
 533	dev_info(hdev->dev, "processing req, op: %lu, bytes: %d\n",
 534		 ctx->op, req->nbytes);
 535
 536	err = img_hash_hw_init(hdev);
 537
 538	if (!err)
 539		err = img_hash_process_data(hdev);
 540
 541	if (err != -EINPROGRESS) {
 542		/* done_task will not finish so do it here */
 543		img_hash_finish_req(req, err);
 544	}
 545	return res;
 546}
 547
 548static int img_hash_update(struct ahash_request *req)
 549{
 550	struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
 551	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 552	struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 553
 554	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
 555	rctx->fallback_req.base.flags = req->base.flags
 556		& CRYPTO_TFM_REQ_MAY_SLEEP;
 557	rctx->fallback_req.nbytes = req->nbytes;
 558	rctx->fallback_req.src = req->src;
 559
 560	return crypto_ahash_update(&rctx->fallback_req);
 561}
 562
 563static int img_hash_final(struct ahash_request *req)
 564{
 565	struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
 566	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 567	struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 568
 569	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
 570	rctx->fallback_req.base.flags = req->base.flags
 571		& CRYPTO_TFM_REQ_MAY_SLEEP;
 572	rctx->fallback_req.result = req->result;
 573
 574	return crypto_ahash_final(&rctx->fallback_req);
 575}
 576
 577static int img_hash_finup(struct ahash_request *req)
 578{
 579	struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
 580	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 581	struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 582
 583	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
 584	rctx->fallback_req.base.flags = req->base.flags
 585		& CRYPTO_TFM_REQ_MAY_SLEEP;
 586	rctx->fallback_req.nbytes = req->nbytes;
 587	rctx->fallback_req.src = req->src;
 588	rctx->fallback_req.result = req->result;
 589
 590	return crypto_ahash_finup(&rctx->fallback_req);
 591}
 592
 593static int img_hash_import(struct ahash_request *req, const void *in)
 594{
 595	struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
 596	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 597	struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 598
 599	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
 600	rctx->fallback_req.base.flags = req->base.flags
 601		& CRYPTO_TFM_REQ_MAY_SLEEP;
 602
 603	return crypto_ahash_import(&rctx->fallback_req, in);
 604}
 605
 606static int img_hash_export(struct ahash_request *req, void *out)
 607{
 608	struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
 609	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 610	struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
 611
 612	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
 613	rctx->fallback_req.base.flags = req->base.flags
 614		& CRYPTO_TFM_REQ_MAY_SLEEP;
 615
 616	return crypto_ahash_export(&rctx->fallback_req, out);
 617}
 618
 619static int img_hash_digest(struct ahash_request *req)
 620{
 621	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 622	struct img_hash_ctx *tctx = crypto_ahash_ctx(tfm);
 623	struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
 624	struct img_hash_dev *hdev = NULL;
 625	struct img_hash_dev *tmp;
 626	int err;
 627
 628	spin_lock(&img_hash.lock);
 629	if (!tctx->hdev) {
 630		list_for_each_entry(tmp, &img_hash.dev_list, list) {
 631			hdev = tmp;
 632			break;
 633		}
 634		tctx->hdev = hdev;
 635
 636	} else {
 637		hdev = tctx->hdev;
 638	}
 639
 640	spin_unlock(&img_hash.lock);
 641	ctx->hdev = hdev;
 642	ctx->flags = 0;
 643	ctx->digsize = crypto_ahash_digestsize(tfm);
 644
 645	switch (ctx->digsize) {
 646	case SHA1_DIGEST_SIZE:
 647		ctx->flags |= DRIVER_FLAGS_SHA1;
 648		break;
 649	case SHA256_DIGEST_SIZE:
 650		ctx->flags |= DRIVER_FLAGS_SHA256;
 651		break;
 652	case SHA224_DIGEST_SIZE:
 653		ctx->flags |= DRIVER_FLAGS_SHA224;
 654		break;
 655	case MD5_DIGEST_SIZE:
 656		ctx->flags |= DRIVER_FLAGS_MD5;
 657		break;
 658	default:
 659		return -EINVAL;
 660	}
 661
 662	ctx->bufcnt = 0;
 663	ctx->offset = 0;
 664	ctx->sent = 0;
 665	ctx->total = req->nbytes;
 666	ctx->sg = req->src;
 667	ctx->sgfirst = req->src;
 668	ctx->nents = sg_nents(ctx->sg);
 669
 670	err = img_hash_handle_queue(tctx->hdev, req);
 671
 672	return err;
 673}
 674
 675static int img_hash_cra_init(struct crypto_tfm *tfm, const char *alg_name)
 676{
 677	struct img_hash_ctx *ctx = crypto_tfm_ctx(tfm);
 678	int err = -ENOMEM;
 679
 680	ctx->fallback = crypto_alloc_ahash(alg_name, 0,
 681					   CRYPTO_ALG_NEED_FALLBACK);
 682	if (IS_ERR(ctx->fallback)) {
 683		pr_err("img_hash: Could not load fallback driver.\n");
 684		err = PTR_ERR(ctx->fallback);
 685		goto err;
 686	}
 687	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 688				 sizeof(struct img_hash_request_ctx) +
 689				 crypto_ahash_reqsize(ctx->fallback) +
 690				 IMG_HASH_DMA_THRESHOLD);
 691
 692	return 0;
 693
 694err:
 695	return err;
 696}
 697
 698static int img_hash_cra_md5_init(struct crypto_tfm *tfm)
 699{
 700	return img_hash_cra_init(tfm, "md5-generic");
 701}
 702
 703static int img_hash_cra_sha1_init(struct crypto_tfm *tfm)
 704{
 705	return img_hash_cra_init(tfm, "sha1-generic");
 706}
 707
 708static int img_hash_cra_sha224_init(struct crypto_tfm *tfm)
 709{
 710	return img_hash_cra_init(tfm, "sha224-generic");
 711}
 712
 713static int img_hash_cra_sha256_init(struct crypto_tfm *tfm)
 714{
 715	return img_hash_cra_init(tfm, "sha256-generic");
 716}
 717
 718static void img_hash_cra_exit(struct crypto_tfm *tfm)
 719{
 720	struct img_hash_ctx *tctx = crypto_tfm_ctx(tfm);
 721
 722	crypto_free_ahash(tctx->fallback);
 723}
 724
 725static irqreturn_t img_irq_handler(int irq, void *dev_id)
 726{
 727	struct img_hash_dev *hdev = dev_id;
 728	u32 reg;
 729
 730	reg = img_hash_read(hdev, CR_INTSTAT);
 731	img_hash_write(hdev, CR_INTCLEAR, reg);
 732
 733	if (reg & CR_INT_NEW_RESULTS_SET) {
 734		dev_dbg(hdev->dev, "IRQ CR_INT_NEW_RESULTS_SET\n");
 735		if (DRIVER_FLAGS_BUSY & hdev->flags) {
 736			hdev->flags |= DRIVER_FLAGS_OUTPUT_READY;
 737			if (!(DRIVER_FLAGS_CPU & hdev->flags))
 738				hdev->flags |= DRIVER_FLAGS_DMA_READY;
 739			tasklet_schedule(&hdev->done_task);
 740		} else {
 741			dev_warn(hdev->dev,
 742				 "HASH interrupt when no active requests.\n");
 743		}
 744	} else if (reg & CR_INT_RESULTS_AVAILABLE) {
 745		dev_warn(hdev->dev,
 746			 "IRQ triggered before the hash had completed\n");
 747	} else if (reg & CR_INT_RESULT_READ_ERR) {
 748		dev_warn(hdev->dev,
 749			 "Attempt to read from an empty result queue\n");
 750	} else if (reg & CR_INT_MESSAGE_WRITE_ERROR) {
 751		dev_warn(hdev->dev,
 752			 "Data written before the hardware was configured\n");
 753	}
 754	return IRQ_HANDLED;
 755}
 756
 757static struct ahash_alg img_algs[] = {
 758	{
 759		.init = img_hash_init,
 760		.update = img_hash_update,
 761		.final = img_hash_final,
 762		.finup = img_hash_finup,
 763		.export = img_hash_export,
 764		.import = img_hash_import,
 765		.digest = img_hash_digest,
 766		.halg = {
 767			.digestsize = MD5_DIGEST_SIZE,
 768			.statesize = sizeof(struct md5_state),
 769			.base = {
 770				.cra_name = "md5",
 771				.cra_driver_name = "img-md5",
 772				.cra_priority = 300,
 773				.cra_flags =
 774				CRYPTO_ALG_ASYNC |
 775				CRYPTO_ALG_NEED_FALLBACK,
 776				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
 777				.cra_ctxsize = sizeof(struct img_hash_ctx),
 778				.cra_init = img_hash_cra_md5_init,
 779				.cra_exit = img_hash_cra_exit,
 780				.cra_module = THIS_MODULE,
 781			}
 782		}
 783	},
 784	{
 785		.init = img_hash_init,
 786		.update = img_hash_update,
 787		.final = img_hash_final,
 788		.finup = img_hash_finup,
 789		.export = img_hash_export,
 790		.import = img_hash_import,
 791		.digest = img_hash_digest,
 792		.halg = {
 793			.digestsize = SHA1_DIGEST_SIZE,
 794			.statesize = sizeof(struct sha1_state),
 795			.base = {
 796				.cra_name = "sha1",
 797				.cra_driver_name = "img-sha1",
 798				.cra_priority = 300,
 799				.cra_flags =
 800				CRYPTO_ALG_ASYNC |
 801				CRYPTO_ALG_NEED_FALLBACK,
 802				.cra_blocksize = SHA1_BLOCK_SIZE,
 803				.cra_ctxsize = sizeof(struct img_hash_ctx),
 804				.cra_init = img_hash_cra_sha1_init,
 805				.cra_exit = img_hash_cra_exit,
 806				.cra_module = THIS_MODULE,
 807			}
 808		}
 809	},
 810	{
 811		.init = img_hash_init,
 812		.update = img_hash_update,
 813		.final = img_hash_final,
 814		.finup = img_hash_finup,
 815		.export = img_hash_export,
 816		.import = img_hash_import,
 817		.digest = img_hash_digest,
 818		.halg = {
 819			.digestsize = SHA224_DIGEST_SIZE,
 820			.statesize = sizeof(struct sha256_state),
 821			.base = {
 822				.cra_name = "sha224",
 823				.cra_driver_name = "img-sha224",
 824				.cra_priority = 300,
 825				.cra_flags =
 826				CRYPTO_ALG_ASYNC |
 827				CRYPTO_ALG_NEED_FALLBACK,
 828				.cra_blocksize = SHA224_BLOCK_SIZE,
 829				.cra_ctxsize = sizeof(struct img_hash_ctx),
 830				.cra_init = img_hash_cra_sha224_init,
 831				.cra_exit = img_hash_cra_exit,
 832				.cra_module = THIS_MODULE,
 833			}
 834		}
 835	},
 836	{
 837		.init = img_hash_init,
 838		.update = img_hash_update,
 839		.final = img_hash_final,
 840		.finup = img_hash_finup,
 841		.export = img_hash_export,
 842		.import = img_hash_import,
 843		.digest = img_hash_digest,
 844		.halg = {
 845			.digestsize = SHA256_DIGEST_SIZE,
 846			.statesize = sizeof(struct sha256_state),
 847			.base = {
 848				.cra_name = "sha256",
 849				.cra_driver_name = "img-sha256",
 850				.cra_priority = 300,
 851				.cra_flags =
 852				CRYPTO_ALG_ASYNC |
 853				CRYPTO_ALG_NEED_FALLBACK,
 854				.cra_blocksize = SHA256_BLOCK_SIZE,
 855				.cra_ctxsize = sizeof(struct img_hash_ctx),
 856				.cra_init = img_hash_cra_sha256_init,
 857				.cra_exit = img_hash_cra_exit,
 858				.cra_module = THIS_MODULE,
 859			}
 860		}
 861	}
 862};
 863
 864static int img_register_algs(struct img_hash_dev *hdev)
 865{
 866	int i, err;
 867
 868	for (i = 0; i < ARRAY_SIZE(img_algs); i++) {
 869		err = crypto_register_ahash(&img_algs[i]);
 870		if (err)
 871			goto err_reg;
 872	}
 873	return 0;
 874
 875err_reg:
 876	for (; i--; )
 877		crypto_unregister_ahash(&img_algs[i]);
 878
 879	return err;
 880}
 881
 882static int img_unregister_algs(struct img_hash_dev *hdev)
 883{
 884	int i;
 885
 886	for (i = 0; i < ARRAY_SIZE(img_algs); i++)
 887		crypto_unregister_ahash(&img_algs[i]);
 888	return 0;
 889}
 890
 891static void img_hash_done_task(unsigned long data)
 892{
 893	struct img_hash_dev *hdev = (struct img_hash_dev *)data;
 894	int err = 0;
 895
 896	if (hdev->err == -EINVAL) {
 897		err = hdev->err;
 898		goto finish;
 899	}
 900
 901	if (!(DRIVER_FLAGS_BUSY & hdev->flags)) {
 902		img_hash_handle_queue(hdev, NULL);
 903		return;
 904	}
 905
 906	if (DRIVER_FLAGS_CPU & hdev->flags) {
 907		if (DRIVER_FLAGS_OUTPUT_READY & hdev->flags) {
 908			hdev->flags &= ~DRIVER_FLAGS_OUTPUT_READY;
 909			goto finish;
 910		}
 911	} else if (DRIVER_FLAGS_DMA_READY & hdev->flags) {
 912		if (DRIVER_FLAGS_DMA_ACTIVE & hdev->flags) {
 913			hdev->flags &= ~DRIVER_FLAGS_DMA_ACTIVE;
 914			img_hash_write_via_dma_stop(hdev);
 915			if (hdev->err) {
 916				err = hdev->err;
 917				goto finish;
 918			}
 919		}
 920		if (DRIVER_FLAGS_OUTPUT_READY & hdev->flags) {
 921			hdev->flags &= ~(DRIVER_FLAGS_DMA_READY |
 922					DRIVER_FLAGS_OUTPUT_READY);
 923			goto finish;
 924		}
 925	}
 926	return;
 927
 928finish:
 929	img_hash_finish_req(hdev->req, err);
 930}
 931
 932static const struct of_device_id img_hash_match[] = {
 933	{ .compatible = "img,hash-accelerator" },
 934	{}
 935};
 936MODULE_DEVICE_TABLE(of, img_hash_match);
 937
 938static int img_hash_probe(struct platform_device *pdev)
 939{
 940	struct img_hash_dev *hdev;
 941	struct device *dev = &pdev->dev;
 942	struct resource *hash_res;
 943	int	irq;
 944	int err;
 945
 946	hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL);
 947	if (hdev == NULL)
 948		return -ENOMEM;
 949
 950	spin_lock_init(&hdev->lock);
 951
 952	hdev->dev = dev;
 953
 954	platform_set_drvdata(pdev, hdev);
 955
 956	INIT_LIST_HEAD(&hdev->list);
 957
 958	tasklet_init(&hdev->done_task, img_hash_done_task, (unsigned long)hdev);
 959	tasklet_init(&hdev->dma_task, img_hash_dma_task, (unsigned long)hdev);
 960
 961	crypto_init_queue(&hdev->queue, IMG_HASH_QUEUE_LENGTH);
 962
 963	/* Register bank */
 964	hash_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 965
 966	hdev->io_base = devm_ioremap_resource(dev, hash_res);
 967	if (IS_ERR(hdev->io_base)) {
 968		err = PTR_ERR(hdev->io_base);
 969		dev_err(dev, "can't ioremap, returned %d\n", err);
 970
 971		goto res_err;
 972	}
 973
 974	/* Write port (DMA or CPU) */
 975	hash_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
 976	hdev->cpu_addr = devm_ioremap_resource(dev, hash_res);
 977	if (IS_ERR(hdev->cpu_addr)) {
 978		dev_err(dev, "can't ioremap write port\n");
 979		err = PTR_ERR(hdev->cpu_addr);
 980		goto res_err;
 981	}
 982	hdev->bus_addr = hash_res->start;
 983
 984	irq = platform_get_irq(pdev, 0);
 985	if (irq < 0) {
 986		dev_err(dev, "no IRQ resource info\n");
 987		err = irq;
 988		goto res_err;
 989	}
 990
 991	err = devm_request_irq(dev, irq, img_irq_handler, 0,
 992			       dev_name(dev), hdev);
 993	if (err) {
 994		dev_err(dev, "unable to request irq\n");
 995		goto res_err;
 996	}
 997	dev_dbg(dev, "using IRQ channel %d\n", irq);
 998
 999	hdev->hash_clk = devm_clk_get(&pdev->dev, "hash");
1000	if (IS_ERR(hdev->hash_clk)) {
1001		dev_err(dev, "clock initialization failed.\n");
1002		err = PTR_ERR(hdev->hash_clk);
1003		goto res_err;
1004	}
1005
1006	hdev->sys_clk = devm_clk_get(&pdev->dev, "sys");
1007	if (IS_ERR(hdev->sys_clk)) {
1008		dev_err(dev, "clock initialization failed.\n");
1009		err = PTR_ERR(hdev->sys_clk);
1010		goto res_err;
1011	}
1012
1013	err = clk_prepare_enable(hdev->hash_clk);
1014	if (err)
1015		goto res_err;
1016
1017	err = clk_prepare_enable(hdev->sys_clk);
1018	if (err)
1019		goto clk_err;
1020
1021	err = img_hash_dma_init(hdev);
1022	if (err)
1023		goto dma_err;
1024
1025	dev_dbg(dev, "using %s for DMA transfers\n",
1026		dma_chan_name(hdev->dma_lch));
1027
1028	spin_lock(&img_hash.lock);
1029	list_add_tail(&hdev->list, &img_hash.dev_list);
1030	spin_unlock(&img_hash.lock);
1031
1032	err = img_register_algs(hdev);
1033	if (err)
1034		goto err_algs;
1035	dev_info(dev, "Img MD5/SHA1/SHA224/SHA256 Hardware accelerator initialized\n");
1036
1037	return 0;
1038
1039err_algs:
1040	spin_lock(&img_hash.lock);
1041	list_del(&hdev->list);
1042	spin_unlock(&img_hash.lock);
1043	dma_release_channel(hdev->dma_lch);
1044dma_err:
1045	clk_disable_unprepare(hdev->sys_clk);
1046clk_err:
1047	clk_disable_unprepare(hdev->hash_clk);
1048res_err:
1049	tasklet_kill(&hdev->done_task);
1050	tasklet_kill(&hdev->dma_task);
1051
1052	return err;
1053}
1054
1055static int img_hash_remove(struct platform_device *pdev)
1056{
1057	static struct img_hash_dev *hdev;
1058
1059	hdev = platform_get_drvdata(pdev);
1060	spin_lock(&img_hash.lock);
1061	list_del(&hdev->list);
1062	spin_unlock(&img_hash.lock);
1063
1064	img_unregister_algs(hdev);
1065
1066	tasklet_kill(&hdev->done_task);
1067	tasklet_kill(&hdev->dma_task);
1068
1069	dma_release_channel(hdev->dma_lch);
1070
1071	clk_disable_unprepare(hdev->hash_clk);
1072	clk_disable_unprepare(hdev->sys_clk);
1073
1074	return 0;
1075}
1076
1077#ifdef CONFIG_PM_SLEEP
1078static int img_hash_suspend(struct device *dev)
1079{
1080	struct img_hash_dev *hdev = dev_get_drvdata(dev);
1081
1082	clk_disable_unprepare(hdev->hash_clk);
1083	clk_disable_unprepare(hdev->sys_clk);
1084
1085	return 0;
1086}
1087
1088static int img_hash_resume(struct device *dev)
1089{
1090	struct img_hash_dev *hdev = dev_get_drvdata(dev);
 
1091
1092	clk_prepare_enable(hdev->hash_clk);
1093	clk_prepare_enable(hdev->sys_clk);
 
 
 
 
 
 
 
1094
1095	return 0;
1096}
1097#endif /* CONFIG_PM_SLEEP */
1098
1099static const struct dev_pm_ops img_hash_pm_ops = {
1100	SET_SYSTEM_SLEEP_PM_OPS(img_hash_suspend, img_hash_resume)
1101};
1102
1103static struct platform_driver img_hash_driver = {
1104	.probe		= img_hash_probe,
1105	.remove		= img_hash_remove,
1106	.driver		= {
1107		.name	= "img-hash-accelerator",
1108		.pm	= &img_hash_pm_ops,
1109		.of_match_table	= of_match_ptr(img_hash_match),
1110	}
1111};
1112module_platform_driver(img_hash_driver);
1113
1114MODULE_LICENSE("GPL v2");
1115MODULE_DESCRIPTION("Imgtec SHA1/224/256 & MD5 hw accelerator driver");
1116MODULE_AUTHOR("Will Thomas.");
1117MODULE_AUTHOR("James Hartley <james.hartley@imgtec.com>");