Linux Audio

Check our new training course

Linux kernel drivers training

Mar 31-Apr 9, 2025, special US time zones
Register
Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Cryptographic API.
   4 *
   5 * Support for OMAP SHA1/MD5 HW acceleration.
   6 *
   7 * Copyright (c) 2010 Nokia Corporation
   8 * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
   9 * Copyright (c) 2011 Texas Instruments Incorporated
  10 *
  11 * Some ideas are from old omap-sha1-md5.c driver.
  12 */
  13
  14#define pr_fmt(fmt) "%s: " fmt, __func__
  15
  16#include <linux/err.h>
  17#include <linux/device.h>
  18#include <linux/module.h>
  19#include <linux/init.h>
  20#include <linux/errno.h>
  21#include <linux/interrupt.h>
  22#include <linux/kernel.h>
  23#include <linux/irq.h>
  24#include <linux/io.h>
  25#include <linux/platform_device.h>
  26#include <linux/scatterlist.h>
  27#include <linux/dma-mapping.h>
  28#include <linux/dmaengine.h>
  29#include <linux/pm_runtime.h>
  30#include <linux/of.h>
  31#include <linux/of_device.h>
  32#include <linux/of_address.h>
  33#include <linux/of_irq.h>
  34#include <linux/delay.h>
  35#include <linux/crypto.h>
  36#include <crypto/scatterwalk.h>
  37#include <crypto/algapi.h>
  38#include <crypto/sha.h>
 
  39#include <crypto/hash.h>
  40#include <crypto/hmac.h>
  41#include <crypto/internal/hash.h>
 
  42
  43#define MD5_DIGEST_SIZE			16
  44
  45#define SHA_REG_IDIGEST(dd, x)		((dd)->pdata->idigest_ofs + ((x)*0x04))
  46#define SHA_REG_DIN(dd, x)		((dd)->pdata->din_ofs + ((x) * 0x04))
  47#define SHA_REG_DIGCNT(dd)		((dd)->pdata->digcnt_ofs)
  48
  49#define SHA_REG_ODIGEST(dd, x)		((dd)->pdata->odigest_ofs + (x * 0x04))
  50
  51#define SHA_REG_CTRL			0x18
  52#define SHA_REG_CTRL_LENGTH		(0xFFFFFFFF << 5)
  53#define SHA_REG_CTRL_CLOSE_HASH		(1 << 4)
  54#define SHA_REG_CTRL_ALGO_CONST		(1 << 3)
  55#define SHA_REG_CTRL_ALGO		(1 << 2)
  56#define SHA_REG_CTRL_INPUT_READY	(1 << 1)
  57#define SHA_REG_CTRL_OUTPUT_READY	(1 << 0)
  58
  59#define SHA_REG_REV(dd)			((dd)->pdata->rev_ofs)
  60
  61#define SHA_REG_MASK(dd)		((dd)->pdata->mask_ofs)
  62#define SHA_REG_MASK_DMA_EN		(1 << 3)
  63#define SHA_REG_MASK_IT_EN		(1 << 2)
  64#define SHA_REG_MASK_SOFTRESET		(1 << 1)
  65#define SHA_REG_AUTOIDLE		(1 << 0)
  66
  67#define SHA_REG_SYSSTATUS(dd)		((dd)->pdata->sysstatus_ofs)
  68#define SHA_REG_SYSSTATUS_RESETDONE	(1 << 0)
  69
  70#define SHA_REG_MODE(dd)		((dd)->pdata->mode_ofs)
  71#define SHA_REG_MODE_HMAC_OUTER_HASH	(1 << 7)
  72#define SHA_REG_MODE_HMAC_KEY_PROC	(1 << 5)
  73#define SHA_REG_MODE_CLOSE_HASH		(1 << 4)
  74#define SHA_REG_MODE_ALGO_CONSTANT	(1 << 3)
  75
  76#define SHA_REG_MODE_ALGO_MASK		(7 << 0)
  77#define SHA_REG_MODE_ALGO_MD5_128	(0 << 1)
  78#define SHA_REG_MODE_ALGO_SHA1_160	(1 << 1)
  79#define SHA_REG_MODE_ALGO_SHA2_224	(2 << 1)
  80#define SHA_REG_MODE_ALGO_SHA2_256	(3 << 1)
  81#define SHA_REG_MODE_ALGO_SHA2_384	(1 << 0)
  82#define SHA_REG_MODE_ALGO_SHA2_512	(3 << 0)
  83
  84#define SHA_REG_LENGTH(dd)		((dd)->pdata->length_ofs)
  85
  86#define SHA_REG_IRQSTATUS		0x118
  87#define SHA_REG_IRQSTATUS_CTX_RDY	(1 << 3)
  88#define SHA_REG_IRQSTATUS_PARTHASH_RDY (1 << 2)
  89#define SHA_REG_IRQSTATUS_INPUT_RDY	(1 << 1)
  90#define SHA_REG_IRQSTATUS_OUTPUT_RDY	(1 << 0)
  91
  92#define SHA_REG_IRQENA			0x11C
  93#define SHA_REG_IRQENA_CTX_RDY		(1 << 3)
  94#define SHA_REG_IRQENA_PARTHASH_RDY	(1 << 2)
  95#define SHA_REG_IRQENA_INPUT_RDY	(1 << 1)
  96#define SHA_REG_IRQENA_OUTPUT_RDY	(1 << 0)
  97
  98#define DEFAULT_TIMEOUT_INTERVAL	HZ
  99
 100#define DEFAULT_AUTOSUSPEND_DELAY	1000
 101
 102/* mostly device flags */
 103#define FLAGS_BUSY		0
 104#define FLAGS_FINAL		1
 105#define FLAGS_DMA_ACTIVE	2
 106#define FLAGS_OUTPUT_READY	3
 107#define FLAGS_INIT		4
 108#define FLAGS_CPU		5
 109#define FLAGS_DMA_READY		6
 110#define FLAGS_AUTO_XOR		7
 111#define FLAGS_BE32_SHA1		8
 112#define FLAGS_SGS_COPIED	9
 113#define FLAGS_SGS_ALLOCED	10
 114#define FLAGS_HUGE		11
 115
 116/* context flags */
 117#define FLAGS_FINUP		16
 118
 119#define FLAGS_MODE_SHIFT	18
 120#define FLAGS_MODE_MASK		(SHA_REG_MODE_ALGO_MASK	<< FLAGS_MODE_SHIFT)
 121#define FLAGS_MODE_MD5		(SHA_REG_MODE_ALGO_MD5_128 << FLAGS_MODE_SHIFT)
 122#define FLAGS_MODE_SHA1		(SHA_REG_MODE_ALGO_SHA1_160 << FLAGS_MODE_SHIFT)
 123#define FLAGS_MODE_SHA224	(SHA_REG_MODE_ALGO_SHA2_224 << FLAGS_MODE_SHIFT)
 124#define FLAGS_MODE_SHA256	(SHA_REG_MODE_ALGO_SHA2_256 << FLAGS_MODE_SHIFT)
 125#define FLAGS_MODE_SHA384	(SHA_REG_MODE_ALGO_SHA2_384 << FLAGS_MODE_SHIFT)
 126#define FLAGS_MODE_SHA512	(SHA_REG_MODE_ALGO_SHA2_512 << FLAGS_MODE_SHIFT)
 127
 128#define FLAGS_HMAC		21
 129#define FLAGS_ERROR		22
 130
 131#define OP_UPDATE		1
 132#define OP_FINAL		2
 133
 134#define OMAP_ALIGN_MASK		(sizeof(u32)-1)
 135#define OMAP_ALIGNED		__attribute__((aligned(sizeof(u32))))
 136
 137#define BUFLEN			SHA512_BLOCK_SIZE
 138#define OMAP_SHA_DMA_THRESHOLD	256
 139
 140#define OMAP_SHA_MAX_DMA_LEN	(1024 * 2048)
 141
 142struct omap_sham_dev;
 143
 144struct omap_sham_reqctx {
 145	struct omap_sham_dev	*dd;
 146	unsigned long		flags;
 147	unsigned long		op;
 148
 149	u8			digest[SHA512_DIGEST_SIZE] OMAP_ALIGNED;
 150	size_t			digcnt;
 151	size_t			bufcnt;
 152	size_t			buflen;
 153
 154	/* walk state */
 155	struct scatterlist	*sg;
 156	struct scatterlist	sgl[2];
 157	int			offset;	/* offset in current sg */
 158	int			sg_len;
 159	unsigned int		total;	/* total request */
 160
 161	u8			buffer[] OMAP_ALIGNED;
 162};
 163
 164struct omap_sham_hmac_ctx {
 165	struct crypto_shash	*shash;
 166	u8			ipad[SHA512_BLOCK_SIZE] OMAP_ALIGNED;
 167	u8			opad[SHA512_BLOCK_SIZE] OMAP_ALIGNED;
 168};
 169
 170struct omap_sham_ctx {
 
 171	unsigned long		flags;
 172
 173	/* fallback stuff */
 174	struct crypto_shash	*fallback;
 175
 176	struct omap_sham_hmac_ctx base[];
 177};
 178
 179#define OMAP_SHAM_QUEUE_LENGTH	10
 180
 181struct omap_sham_algs_info {
 182	struct ahash_alg	*algs_list;
 183	unsigned int		size;
 184	unsigned int		registered;
 185};
 186
 187struct omap_sham_pdata {
 188	struct omap_sham_algs_info	*algs_info;
 189	unsigned int	algs_info_size;
 190	unsigned long	flags;
 191	int		digest_size;
 192
 193	void		(*copy_hash)(struct ahash_request *req, int out);
 194	void		(*write_ctrl)(struct omap_sham_dev *dd, size_t length,
 195				      int final, int dma);
 196	void		(*trigger)(struct omap_sham_dev *dd, size_t length);
 197	int		(*poll_irq)(struct omap_sham_dev *dd);
 198	irqreturn_t	(*intr_hdlr)(int irq, void *dev_id);
 199
 200	u32		odigest_ofs;
 201	u32		idigest_ofs;
 202	u32		din_ofs;
 203	u32		digcnt_ofs;
 204	u32		rev_ofs;
 205	u32		mask_ofs;
 206	u32		sysstatus_ofs;
 207	u32		mode_ofs;
 208	u32		length_ofs;
 209
 210	u32		major_mask;
 211	u32		major_shift;
 212	u32		minor_mask;
 213	u32		minor_shift;
 214};
 215
 216struct omap_sham_dev {
 217	struct list_head	list;
 218	unsigned long		phys_base;
 219	struct device		*dev;
 220	void __iomem		*io_base;
 221	int			irq;
 222	spinlock_t		lock;
 223	int			err;
 224	struct dma_chan		*dma_lch;
 225	struct tasklet_struct	done_task;
 226	u8			polling_mode;
 227	u8			xmit_buf[BUFLEN] OMAP_ALIGNED;
 228
 229	unsigned long		flags;
 230	int			fallback_sz;
 231	struct crypto_queue	queue;
 232	struct ahash_request	*req;
 
 233
 234	const struct omap_sham_pdata	*pdata;
 235};
 236
 237struct omap_sham_drv {
 238	struct list_head	dev_list;
 239	spinlock_t		lock;
 240	unsigned long		flags;
 241};
 242
 243static struct omap_sham_drv sham = {
 244	.dev_list = LIST_HEAD_INIT(sham.dev_list),
 245	.lock = __SPIN_LOCK_UNLOCKED(sham.lock),
 246};
 247
 
 
 
 248static inline u32 omap_sham_read(struct omap_sham_dev *dd, u32 offset)
 249{
 250	return __raw_readl(dd->io_base + offset);
 251}
 252
 253static inline void omap_sham_write(struct omap_sham_dev *dd,
 254					u32 offset, u32 value)
 255{
 256	__raw_writel(value, dd->io_base + offset);
 257}
 258
 259static inline void omap_sham_write_mask(struct omap_sham_dev *dd, u32 address,
 260					u32 value, u32 mask)
 261{
 262	u32 val;
 263
 264	val = omap_sham_read(dd, address);
 265	val &= ~mask;
 266	val |= value;
 267	omap_sham_write(dd, address, val);
 268}
 269
 270static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit)
 271{
 272	unsigned long timeout = jiffies + DEFAULT_TIMEOUT_INTERVAL;
 273
 274	while (!(omap_sham_read(dd, offset) & bit)) {
 275		if (time_is_before_jiffies(timeout))
 276			return -ETIMEDOUT;
 277	}
 278
 279	return 0;
 280}
 281
 282static void omap_sham_copy_hash_omap2(struct ahash_request *req, int out)
 283{
 284	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 285	struct omap_sham_dev *dd = ctx->dd;
 286	u32 *hash = (u32 *)ctx->digest;
 287	int i;
 288
 289	for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) {
 290		if (out)
 291			hash[i] = omap_sham_read(dd, SHA_REG_IDIGEST(dd, i));
 292		else
 293			omap_sham_write(dd, SHA_REG_IDIGEST(dd, i), hash[i]);
 294	}
 295}
 296
 297static void omap_sham_copy_hash_omap4(struct ahash_request *req, int out)
 298{
 299	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 300	struct omap_sham_dev *dd = ctx->dd;
 301	int i;
 302
 303	if (ctx->flags & BIT(FLAGS_HMAC)) {
 304		struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req);
 305		struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
 306		struct omap_sham_hmac_ctx *bctx = tctx->base;
 307		u32 *opad = (u32 *)bctx->opad;
 308
 309		for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) {
 310			if (out)
 311				opad[i] = omap_sham_read(dd,
 312						SHA_REG_ODIGEST(dd, i));
 313			else
 314				omap_sham_write(dd, SHA_REG_ODIGEST(dd, i),
 315						opad[i]);
 316		}
 317	}
 318
 319	omap_sham_copy_hash_omap2(req, out);
 320}
 321
 322static void omap_sham_copy_ready_hash(struct ahash_request *req)
 323{
 324	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 325	u32 *in = (u32 *)ctx->digest;
 326	u32 *hash = (u32 *)req->result;
 327	int i, d, big_endian = 0;
 328
 329	if (!hash)
 330		return;
 331
 332	switch (ctx->flags & FLAGS_MODE_MASK) {
 333	case FLAGS_MODE_MD5:
 334		d = MD5_DIGEST_SIZE / sizeof(u32);
 335		break;
 336	case FLAGS_MODE_SHA1:
 337		/* OMAP2 SHA1 is big endian */
 338		if (test_bit(FLAGS_BE32_SHA1, &ctx->dd->flags))
 339			big_endian = 1;
 340		d = SHA1_DIGEST_SIZE / sizeof(u32);
 341		break;
 342	case FLAGS_MODE_SHA224:
 343		d = SHA224_DIGEST_SIZE / sizeof(u32);
 344		break;
 345	case FLAGS_MODE_SHA256:
 346		d = SHA256_DIGEST_SIZE / sizeof(u32);
 347		break;
 348	case FLAGS_MODE_SHA384:
 349		d = SHA384_DIGEST_SIZE / sizeof(u32);
 350		break;
 351	case FLAGS_MODE_SHA512:
 352		d = SHA512_DIGEST_SIZE / sizeof(u32);
 353		break;
 354	default:
 355		d = 0;
 356	}
 357
 358	if (big_endian)
 359		for (i = 0; i < d; i++)
 360			hash[i] = be32_to_cpup((__be32 *)in + i);
 361	else
 362		for (i = 0; i < d; i++)
 363			hash[i] = le32_to_cpup((__le32 *)in + i);
 364}
 365
 366static int omap_sham_hw_init(struct omap_sham_dev *dd)
 367{
 368	int err;
 369
 370	err = pm_runtime_get_sync(dd->dev);
 371	if (err < 0) {
 372		dev_err(dd->dev, "failed to get sync: %d\n", err);
 373		return err;
 374	}
 375
 376	if (!test_bit(FLAGS_INIT, &dd->flags)) {
 377		set_bit(FLAGS_INIT, &dd->flags);
 378		dd->err = 0;
 379	}
 380
 381	return 0;
 382}
 383
 384static void omap_sham_write_ctrl_omap2(struct omap_sham_dev *dd, size_t length,
 385				 int final, int dma)
 386{
 387	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 388	u32 val = length << 5, mask;
 389
 390	if (likely(ctx->digcnt))
 391		omap_sham_write(dd, SHA_REG_DIGCNT(dd), ctx->digcnt);
 392
 393	omap_sham_write_mask(dd, SHA_REG_MASK(dd),
 394		SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0),
 395		SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
 396	/*
 397	 * Setting ALGO_CONST only for the first iteration
 398	 * and CLOSE_HASH only for the last one.
 399	 */
 400	if ((ctx->flags & FLAGS_MODE_MASK) == FLAGS_MODE_SHA1)
 401		val |= SHA_REG_CTRL_ALGO;
 402	if (!ctx->digcnt)
 403		val |= SHA_REG_CTRL_ALGO_CONST;
 404	if (final)
 405		val |= SHA_REG_CTRL_CLOSE_HASH;
 406
 407	mask = SHA_REG_CTRL_ALGO_CONST | SHA_REG_CTRL_CLOSE_HASH |
 408			SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH;
 409
 410	omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask);
 411}
 412
 413static void omap_sham_trigger_omap2(struct omap_sham_dev *dd, size_t length)
 414{
 415}
 416
 417static int omap_sham_poll_irq_omap2(struct omap_sham_dev *dd)
 418{
 419	return omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY);
 420}
 421
 422static int get_block_size(struct omap_sham_reqctx *ctx)
 423{
 424	int d;
 425
 426	switch (ctx->flags & FLAGS_MODE_MASK) {
 427	case FLAGS_MODE_MD5:
 428	case FLAGS_MODE_SHA1:
 429		d = SHA1_BLOCK_SIZE;
 430		break;
 431	case FLAGS_MODE_SHA224:
 432	case FLAGS_MODE_SHA256:
 433		d = SHA256_BLOCK_SIZE;
 434		break;
 435	case FLAGS_MODE_SHA384:
 436	case FLAGS_MODE_SHA512:
 437		d = SHA512_BLOCK_SIZE;
 438		break;
 439	default:
 440		d = 0;
 441	}
 442
 443	return d;
 444}
 445
 446static void omap_sham_write_n(struct omap_sham_dev *dd, u32 offset,
 447				    u32 *value, int count)
 448{
 449	for (; count--; value++, offset += 4)
 450		omap_sham_write(dd, offset, *value);
 451}
 452
 453static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length,
 454				 int final, int dma)
 455{
 456	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 457	u32 val, mask;
 458
 
 
 
 459	/*
 460	 * Setting ALGO_CONST only for the first iteration and
 461	 * CLOSE_HASH only for the last one. Note that flags mode bits
 462	 * correspond to algorithm encoding in mode register.
 463	 */
 464	val = (ctx->flags & FLAGS_MODE_MASK) >> (FLAGS_MODE_SHIFT);
 465	if (!ctx->digcnt) {
 466		struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req);
 467		struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
 468		struct omap_sham_hmac_ctx *bctx = tctx->base;
 469		int bs, nr_dr;
 470
 471		val |= SHA_REG_MODE_ALGO_CONSTANT;
 472
 473		if (ctx->flags & BIT(FLAGS_HMAC)) {
 474			bs = get_block_size(ctx);
 475			nr_dr = bs / (2 * sizeof(u32));
 476			val |= SHA_REG_MODE_HMAC_KEY_PROC;
 477			omap_sham_write_n(dd, SHA_REG_ODIGEST(dd, 0),
 478					  (u32 *)bctx->ipad, nr_dr);
 479			omap_sham_write_n(dd, SHA_REG_IDIGEST(dd, 0),
 480					  (u32 *)bctx->ipad + nr_dr, nr_dr);
 481			ctx->digcnt += bs;
 482		}
 483	}
 484
 485	if (final) {
 486		val |= SHA_REG_MODE_CLOSE_HASH;
 487
 488		if (ctx->flags & BIT(FLAGS_HMAC))
 489			val |= SHA_REG_MODE_HMAC_OUTER_HASH;
 490	}
 491
 492	mask = SHA_REG_MODE_ALGO_CONSTANT | SHA_REG_MODE_CLOSE_HASH |
 493	       SHA_REG_MODE_ALGO_MASK | SHA_REG_MODE_HMAC_OUTER_HASH |
 494	       SHA_REG_MODE_HMAC_KEY_PROC;
 495
 496	dev_dbg(dd->dev, "ctrl: %08x, flags: %08lx\n", val, ctx->flags);
 497	omap_sham_write_mask(dd, SHA_REG_MODE(dd), val, mask);
 498	omap_sham_write(dd, SHA_REG_IRQENA, SHA_REG_IRQENA_OUTPUT_RDY);
 499	omap_sham_write_mask(dd, SHA_REG_MASK(dd),
 500			     SHA_REG_MASK_IT_EN |
 501				     (dma ? SHA_REG_MASK_DMA_EN : 0),
 502			     SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
 503}
 504
 505static void omap_sham_trigger_omap4(struct omap_sham_dev *dd, size_t length)
 506{
 507	omap_sham_write(dd, SHA_REG_LENGTH(dd), length);
 508}
 509
 510static int omap_sham_poll_irq_omap4(struct omap_sham_dev *dd)
 511{
 512	return omap_sham_wait(dd, SHA_REG_IRQSTATUS,
 513			      SHA_REG_IRQSTATUS_INPUT_RDY);
 514}
 515
 516static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, size_t length,
 517			      int final)
 518{
 519	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 520	int count, len32, bs32, offset = 0;
 521	const u32 *buffer;
 522	int mlen;
 523	struct sg_mapping_iter mi;
 524
 525	dev_dbg(dd->dev, "xmit_cpu: digcnt: %zd, length: %zd, final: %d\n",
 526						ctx->digcnt, length, final);
 527
 528	dd->pdata->write_ctrl(dd, length, final, 0);
 529	dd->pdata->trigger(dd, length);
 530
 531	/* should be non-zero before next lines to disable clocks later */
 532	ctx->digcnt += length;
 533	ctx->total -= length;
 534
 535	if (final)
 536		set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
 537
 538	set_bit(FLAGS_CPU, &dd->flags);
 539
 540	len32 = DIV_ROUND_UP(length, sizeof(u32));
 541	bs32 = get_block_size(ctx) / sizeof(u32);
 542
 543	sg_miter_start(&mi, ctx->sg, ctx->sg_len,
 544		       SG_MITER_FROM_SG | SG_MITER_ATOMIC);
 545
 546	mlen = 0;
 547
 548	while (len32) {
 549		if (dd->pdata->poll_irq(dd))
 550			return -ETIMEDOUT;
 551
 552		for (count = 0; count < min(len32, bs32); count++, offset++) {
 553			if (!mlen) {
 554				sg_miter_next(&mi);
 555				mlen = mi.length;
 556				if (!mlen) {
 557					pr_err("sg miter failure.\n");
 558					return -EINVAL;
 559				}
 560				offset = 0;
 561				buffer = mi.addr;
 562			}
 563			omap_sham_write(dd, SHA_REG_DIN(dd, count),
 564					buffer[offset]);
 565			mlen -= 4;
 566		}
 567		len32 -= min(len32, bs32);
 568	}
 569
 570	sg_miter_stop(&mi);
 571
 572	return -EINPROGRESS;
 573}
 574
 575static void omap_sham_dma_callback(void *param)
 576{
 577	struct omap_sham_dev *dd = param;
 578
 579	set_bit(FLAGS_DMA_READY, &dd->flags);
 580	tasklet_schedule(&dd->done_task);
 581}
 582
 583static int omap_sham_xmit_dma(struct omap_sham_dev *dd, size_t length,
 584			      int final)
 585{
 586	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 587	struct dma_async_tx_descriptor *tx;
 588	struct dma_slave_config cfg;
 589	int ret;
 590
 591	dev_dbg(dd->dev, "xmit_dma: digcnt: %zd, length: %zd, final: %d\n",
 592						ctx->digcnt, length, final);
 593
 594	if (!dma_map_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE)) {
 595		dev_err(dd->dev, "dma_map_sg error\n");
 596		return -EINVAL;
 597	}
 598
 599	memset(&cfg, 0, sizeof(cfg));
 600
 601	cfg.dst_addr = dd->phys_base + SHA_REG_DIN(dd, 0);
 602	cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 603	cfg.dst_maxburst = get_block_size(ctx) / DMA_SLAVE_BUSWIDTH_4_BYTES;
 604
 605	ret = dmaengine_slave_config(dd->dma_lch, &cfg);
 606	if (ret) {
 607		pr_err("omap-sham: can't configure dmaengine slave: %d\n", ret);
 608		return ret;
 609	}
 610
 611	tx = dmaengine_prep_slave_sg(dd->dma_lch, ctx->sg, ctx->sg_len,
 612				     DMA_MEM_TO_DEV,
 613				     DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 614
 615	if (!tx) {
 616		dev_err(dd->dev, "prep_slave_sg failed\n");
 617		return -EINVAL;
 618	}
 619
 620	tx->callback = omap_sham_dma_callback;
 621	tx->callback_param = dd;
 622
 623	dd->pdata->write_ctrl(dd, length, final, 1);
 624
 625	ctx->digcnt += length;
 626	ctx->total -= length;
 627
 628	if (final)
 629		set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
 630
 631	set_bit(FLAGS_DMA_ACTIVE, &dd->flags);
 632
 633	dmaengine_submit(tx);
 634	dma_async_issue_pending(dd->dma_lch);
 635
 636	dd->pdata->trigger(dd, length);
 637
 638	return -EINPROGRESS;
 639}
 640
 641static int omap_sham_copy_sg_lists(struct omap_sham_reqctx *ctx,
 642				   struct scatterlist *sg, int bs, int new_len)
 643{
 644	int n = sg_nents(sg);
 645	struct scatterlist *tmp;
 646	int offset = ctx->offset;
 647
 648	ctx->total = new_len;
 649
 650	if (ctx->bufcnt)
 651		n++;
 652
 653	ctx->sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL);
 654	if (!ctx->sg)
 655		return -ENOMEM;
 656
 657	sg_init_table(ctx->sg, n);
 658
 659	tmp = ctx->sg;
 660
 661	ctx->sg_len = 0;
 662
 663	if (ctx->bufcnt) {
 664		sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt);
 665		tmp = sg_next(tmp);
 666		ctx->sg_len++;
 667		new_len -= ctx->bufcnt;
 668	}
 669
 670	while (sg && new_len) {
 671		int len = sg->length - offset;
 672
 673		if (len <= 0) {
 674			offset -= sg->length;
 675			sg = sg_next(sg);
 676			continue;
 677		}
 678
 679		if (new_len < len)
 680			len = new_len;
 681
 682		if (len > 0) {
 683			new_len -= len;
 684			sg_set_page(tmp, sg_page(sg), len, sg->offset + offset);
 685			offset = 0;
 686			ctx->offset = 0;
 687			ctx->sg_len++;
 688			if (new_len <= 0)
 689				break;
 690			tmp = sg_next(tmp);
 691		}
 692
 693		sg = sg_next(sg);
 694	}
 695
 696	if (tmp)
 697		sg_mark_end(tmp);
 698
 699	set_bit(FLAGS_SGS_ALLOCED, &ctx->dd->flags);
 700
 701	ctx->offset += new_len - ctx->bufcnt;
 702	ctx->bufcnt = 0;
 703
 704	return 0;
 705}
 706
 707static int omap_sham_copy_sgs(struct omap_sham_reqctx *ctx,
 708			      struct scatterlist *sg, int bs,
 709			      unsigned int new_len)
 710{
 711	int pages;
 712	void *buf;
 713
 714	pages = get_order(new_len);
 715
 716	buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
 717	if (!buf) {
 718		pr_err("Couldn't allocate pages for unaligned cases.\n");
 719		return -ENOMEM;
 720	}
 721
 722	if (ctx->bufcnt)
 723		memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt);
 724
 725	scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->offset,
 726				 min(new_len, ctx->total) - ctx->bufcnt, 0);
 727	sg_init_table(ctx->sgl, 1);
 728	sg_set_buf(ctx->sgl, buf, new_len);
 729	ctx->sg = ctx->sgl;
 730	set_bit(FLAGS_SGS_COPIED, &ctx->dd->flags);
 731	ctx->sg_len = 1;
 732	ctx->offset += new_len - ctx->bufcnt;
 733	ctx->bufcnt = 0;
 734	ctx->total = new_len;
 735
 736	return 0;
 737}
 738
 739static int omap_sham_align_sgs(struct scatterlist *sg,
 740			       int nbytes, int bs, bool final,
 741			       struct omap_sham_reqctx *rctx)
 742{
 743	int n = 0;
 744	bool aligned = true;
 745	bool list_ok = true;
 746	struct scatterlist *sg_tmp = sg;
 747	int new_len;
 748	int offset = rctx->offset;
 749	int bufcnt = rctx->bufcnt;
 750
 751	if (!sg || !sg->length || !nbytes) {
 752		if (bufcnt) {
 753			bufcnt = DIV_ROUND_UP(bufcnt, bs) * bs;
 754			sg_init_table(rctx->sgl, 1);
 755			sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, bufcnt);
 756			rctx->sg = rctx->sgl;
 757			rctx->sg_len = 1;
 758		}
 759
 760		return 0;
 761	}
 762
 763	new_len = nbytes;
 764
 765	if (offset)
 766		list_ok = false;
 767
 768	if (final)
 769		new_len = DIV_ROUND_UP(new_len, bs) * bs;
 770	else
 771		new_len = (new_len - 1) / bs * bs;
 772
 773	if (!new_len)
 774		return 0;
 775
 776	if (nbytes != new_len)
 777		list_ok = false;
 778
 779	while (nbytes > 0 && sg_tmp) {
 780		n++;
 781
 782		if (bufcnt) {
 783			if (!IS_ALIGNED(bufcnt, bs)) {
 784				aligned = false;
 785				break;
 786			}
 787			nbytes -= bufcnt;
 788			bufcnt = 0;
 789			if (!nbytes)
 790				list_ok = false;
 791
 792			continue;
 793		}
 794
 795#ifdef CONFIG_ZONE_DMA
 796		if (page_zonenum(sg_page(sg_tmp)) != ZONE_DMA) {
 797			aligned = false;
 798			break;
 799		}
 800#endif
 801
 802		if (offset < sg_tmp->length) {
 803			if (!IS_ALIGNED(offset + sg_tmp->offset, 4)) {
 804				aligned = false;
 805				break;
 806			}
 807
 808			if (!IS_ALIGNED(sg_tmp->length - offset, bs)) {
 809				aligned = false;
 810				break;
 811			}
 812		}
 813
 814		if (offset) {
 815			offset -= sg_tmp->length;
 816			if (offset < 0) {
 817				nbytes += offset;
 818				offset = 0;
 819			}
 820		} else {
 821			nbytes -= sg_tmp->length;
 822		}
 823
 824		sg_tmp = sg_next(sg_tmp);
 825
 826		if (nbytes < 0) {
 827			list_ok = false;
 828			break;
 829		}
 830	}
 831
 832	if (new_len > OMAP_SHA_MAX_DMA_LEN) {
 833		new_len = OMAP_SHA_MAX_DMA_LEN;
 834		aligned = false;
 835	}
 836
 837	if (!aligned)
 838		return omap_sham_copy_sgs(rctx, sg, bs, new_len);
 839	else if (!list_ok)
 840		return omap_sham_copy_sg_lists(rctx, sg, bs, new_len);
 841
 842	rctx->total = new_len;
 843	rctx->offset += new_len;
 844	rctx->sg_len = n;
 845	if (rctx->bufcnt) {
 846		sg_init_table(rctx->sgl, 2);
 847		sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, rctx->bufcnt);
 848		sg_chain(rctx->sgl, 2, sg);
 849		rctx->sg = rctx->sgl;
 850	} else {
 851		rctx->sg = sg;
 852	}
 853
 854	return 0;
 855}
 856
 857static int omap_sham_prepare_request(struct ahash_request *req, bool update)
 858{
 
 
 859	struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
 860	int bs;
 861	int ret;
 862	unsigned int nbytes;
 863	bool final = rctx->flags & BIT(FLAGS_FINUP);
 
 864	int hash_later;
 865
 866	bs = get_block_size(rctx);
 867
 868	nbytes = rctx->bufcnt;
 869
 870	if (update)
 871		nbytes += req->nbytes - rctx->offset;
 872
 873	dev_dbg(rctx->dd->dev,
 874		"%s: nbytes=%d, bs=%d, total=%d, offset=%d, bufcnt=%zd\n",
 875		__func__, nbytes, bs, rctx->total, rctx->offset,
 876		rctx->bufcnt);
 877
 878	if (!nbytes)
 879		return 0;
 880
 881	rctx->total = nbytes;
 882
 883	if (update && req->nbytes && (!IS_ALIGNED(rctx->bufcnt, bs))) {
 884		int len = bs - rctx->bufcnt % bs;
 885
 886		if (len > req->nbytes)
 887			len = req->nbytes;
 888		scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, req->src,
 889					 0, len, 0);
 890		rctx->bufcnt += len;
 891		rctx->offset = len;
 892	}
 893
 894	if (rctx->bufcnt)
 895		memcpy(rctx->dd->xmit_buf, rctx->buffer, rctx->bufcnt);
 896
 897	ret = omap_sham_align_sgs(req->src, nbytes, bs, final, rctx);
 898	if (ret)
 899		return ret;
 900
 901	hash_later = nbytes - rctx->total;
 902	if (hash_later < 0)
 903		hash_later = 0;
 904
 905	if (hash_later && hash_later <= rctx->buflen) {
 906		scatterwalk_map_and_copy(rctx->buffer,
 907					 req->src,
 908					 req->nbytes - hash_later,
 909					 hash_later, 0);
 910
 911		rctx->bufcnt = hash_later;
 912	} else {
 913		rctx->bufcnt = 0;
 914	}
 915
 916	if (hash_later > rctx->buflen)
 917		set_bit(FLAGS_HUGE, &rctx->dd->flags);
 918
 919	rctx->total = min(nbytes, rctx->total);
 920
 921	return 0;
 922}
 923
 924static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
 925{
 926	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 927
 928	dma_unmap_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
 929
 930	clear_bit(FLAGS_DMA_ACTIVE, &dd->flags);
 931
 932	return 0;
 933}
 934
 935static struct omap_sham_dev *omap_sham_find_dev(struct omap_sham_reqctx *ctx)
 936{
 937	struct omap_sham_dev *dd;
 938
 939	if (ctx->dd)
 940		return ctx->dd;
 941
 942	spin_lock_bh(&sham.lock);
 943	dd = list_first_entry(&sham.dev_list, struct omap_sham_dev, list);
 944	list_move_tail(&dd->list, &sham.dev_list);
 945	ctx->dd = dd;
 946	spin_unlock_bh(&sham.lock);
 947
 948	return dd;
 949}
 950
 951static int omap_sham_init(struct ahash_request *req)
 952{
 953	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 954	struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
 955	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 956	struct omap_sham_dev *dd;
 957	int bs = 0;
 958
 959	ctx->dd = NULL;
 960
 961	dd = omap_sham_find_dev(ctx);
 962	if (!dd)
 963		return -ENODEV;
 964
 965	ctx->flags = 0;
 966
 967	dev_dbg(dd->dev, "init: digest size: %d\n",
 968		crypto_ahash_digestsize(tfm));
 969
 970	switch (crypto_ahash_digestsize(tfm)) {
 971	case MD5_DIGEST_SIZE:
 972		ctx->flags |= FLAGS_MODE_MD5;
 973		bs = SHA1_BLOCK_SIZE;
 974		break;
 975	case SHA1_DIGEST_SIZE:
 976		ctx->flags |= FLAGS_MODE_SHA1;
 977		bs = SHA1_BLOCK_SIZE;
 978		break;
 979	case SHA224_DIGEST_SIZE:
 980		ctx->flags |= FLAGS_MODE_SHA224;
 981		bs = SHA224_BLOCK_SIZE;
 982		break;
 983	case SHA256_DIGEST_SIZE:
 984		ctx->flags |= FLAGS_MODE_SHA256;
 985		bs = SHA256_BLOCK_SIZE;
 986		break;
 987	case SHA384_DIGEST_SIZE:
 988		ctx->flags |= FLAGS_MODE_SHA384;
 989		bs = SHA384_BLOCK_SIZE;
 990		break;
 991	case SHA512_DIGEST_SIZE:
 992		ctx->flags |= FLAGS_MODE_SHA512;
 993		bs = SHA512_BLOCK_SIZE;
 994		break;
 995	}
 996
 997	ctx->bufcnt = 0;
 998	ctx->digcnt = 0;
 999	ctx->total = 0;
1000	ctx->offset = 0;
1001	ctx->buflen = BUFLEN;
1002
1003	if (tctx->flags & BIT(FLAGS_HMAC)) {
1004		if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) {
1005			struct omap_sham_hmac_ctx *bctx = tctx->base;
1006
1007			memcpy(ctx->buffer, bctx->ipad, bs);
1008			ctx->bufcnt = bs;
1009		}
1010
1011		ctx->flags |= BIT(FLAGS_HMAC);
1012	}
1013
1014	return 0;
1015
1016}
1017
1018static int omap_sham_update_req(struct omap_sham_dev *dd)
1019{
1020	struct ahash_request *req = dd->req;
1021	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1022	int err;
1023	bool final = (ctx->flags & BIT(FLAGS_FINUP)) &&
1024			!(dd->flags & BIT(FLAGS_HUGE));
1025
1026	dev_dbg(dd->dev, "update_req: total: %u, digcnt: %zd, final: %d",
1027		ctx->total, ctx->digcnt, final);
1028
1029	if (ctx->total < get_block_size(ctx) ||
1030	    ctx->total < dd->fallback_sz)
1031		ctx->flags |= BIT(FLAGS_CPU);
1032
1033	if (ctx->flags & BIT(FLAGS_CPU))
1034		err = omap_sham_xmit_cpu(dd, ctx->total, final);
1035	else
1036		err = omap_sham_xmit_dma(dd, ctx->total, final);
1037
1038	/* wait for dma completion before can take more data */
1039	dev_dbg(dd->dev, "update: err: %d, digcnt: %zd\n", err, ctx->digcnt);
1040
1041	return err;
1042}
1043
1044static int omap_sham_final_req(struct omap_sham_dev *dd)
1045{
1046	struct ahash_request *req = dd->req;
1047	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1048	int err = 0, use_dma = 1;
1049
1050	if (dd->flags & BIT(FLAGS_HUGE))
1051		return 0;
1052
1053	if ((ctx->total <= get_block_size(ctx)) || dd->polling_mode)
1054		/*
1055		 * faster to handle last block with cpu or
1056		 * use cpu when dma is not present.
1057		 */
1058		use_dma = 0;
1059
1060	if (use_dma)
1061		err = omap_sham_xmit_dma(dd, ctx->total, 1);
1062	else
1063		err = omap_sham_xmit_cpu(dd, ctx->total, 1);
1064
1065	ctx->bufcnt = 0;
1066
1067	dev_dbg(dd->dev, "final_req: err: %d\n", err);
1068
1069	return err;
1070}
1071
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1072static int omap_sham_finish_hmac(struct ahash_request *req)
1073{
1074	struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1075	struct omap_sham_hmac_ctx *bctx = tctx->base;
1076	int bs = crypto_shash_blocksize(bctx->shash);
1077	int ds = crypto_shash_digestsize(bctx->shash);
1078	SHASH_DESC_ON_STACK(shash, bctx->shash);
1079
1080	shash->tfm = bctx->shash;
1081
1082	return crypto_shash_init(shash) ?:
1083	       crypto_shash_update(shash, bctx->opad, bs) ?:
1084	       crypto_shash_finup(shash, req->result, ds, req->result);
1085}
1086
1087static int omap_sham_finish(struct ahash_request *req)
1088{
1089	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1090	struct omap_sham_dev *dd = ctx->dd;
1091	int err = 0;
1092
1093	if (ctx->digcnt) {
1094		omap_sham_copy_ready_hash(req);
1095		if ((ctx->flags & BIT(FLAGS_HMAC)) &&
1096				!test_bit(FLAGS_AUTO_XOR, &dd->flags))
1097			err = omap_sham_finish_hmac(req);
1098	}
1099
1100	dev_dbg(dd->dev, "digcnt: %zd, bufcnt: %zd\n", ctx->digcnt, ctx->bufcnt);
1101
1102	return err;
1103}
1104
1105static void omap_sham_finish_req(struct ahash_request *req, int err)
1106{
1107	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1108	struct omap_sham_dev *dd = ctx->dd;
1109
1110	if (test_bit(FLAGS_SGS_COPIED, &dd->flags))
1111		free_pages((unsigned long)sg_virt(ctx->sg),
1112			   get_order(ctx->sg->length));
1113
1114	if (test_bit(FLAGS_SGS_ALLOCED, &dd->flags))
1115		kfree(ctx->sg);
1116
1117	ctx->sg = NULL;
1118
1119	dd->flags &= ~(BIT(FLAGS_SGS_ALLOCED) | BIT(FLAGS_SGS_COPIED));
 
 
 
 
 
1120
1121	if (dd->flags & BIT(FLAGS_HUGE)) {
1122		dd->flags &= ~(BIT(FLAGS_CPU) | BIT(FLAGS_DMA_READY) |
1123				BIT(FLAGS_OUTPUT_READY) | BIT(FLAGS_HUGE));
1124		omap_sham_prepare_request(req, ctx->op == OP_UPDATE);
1125		if (ctx->op == OP_UPDATE || (dd->flags & BIT(FLAGS_HUGE))) {
1126			err = omap_sham_update_req(dd);
1127			if (err != -EINPROGRESS &&
1128			    (ctx->flags & BIT(FLAGS_FINUP)))
1129				err = omap_sham_final_req(dd);
1130		} else if (ctx->op == OP_FINAL) {
1131			omap_sham_final_req(dd);
1132		}
1133		return;
1134	}
1135
1136	if (!err) {
1137		dd->pdata->copy_hash(req, 1);
1138		if (test_bit(FLAGS_FINAL, &dd->flags))
1139			err = omap_sham_finish(req);
1140	} else {
1141		ctx->flags |= BIT(FLAGS_ERROR);
1142	}
1143
1144	/* atomic operation is not needed here */
1145	dd->flags &= ~(BIT(FLAGS_BUSY) | BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) |
1146			BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY));
1147
1148	pm_runtime_mark_last_busy(dd->dev);
1149	pm_runtime_put_autosuspend(dd->dev);
1150
1151	ctx->offset = 0;
1152
1153	if (req->base.complete)
1154		req->base.complete(&req->base, err);
1155}
1156
1157static int omap_sham_handle_queue(struct omap_sham_dev *dd,
1158				  struct ahash_request *req)
1159{
1160	struct crypto_async_request *async_req, *backlog;
1161	struct omap_sham_reqctx *ctx;
1162	unsigned long flags;
1163	int err = 0, ret = 0;
1164
1165retry:
1166	spin_lock_irqsave(&dd->lock, flags);
1167	if (req)
1168		ret = ahash_enqueue_request(&dd->queue, req);
1169	if (test_bit(FLAGS_BUSY, &dd->flags)) {
1170		spin_unlock_irqrestore(&dd->lock, flags);
1171		return ret;
1172	}
1173	backlog = crypto_get_backlog(&dd->queue);
1174	async_req = crypto_dequeue_request(&dd->queue);
1175	if (async_req)
1176		set_bit(FLAGS_BUSY, &dd->flags);
1177	spin_unlock_irqrestore(&dd->lock, flags);
1178
1179	if (!async_req)
1180		return ret;
1181
1182	if (backlog)
1183		backlog->complete(backlog, -EINPROGRESS);
1184
1185	req = ahash_request_cast(async_req);
1186	dd->req = req;
1187	ctx = ahash_request_ctx(req);
1188
1189	err = omap_sham_prepare_request(req, ctx->op == OP_UPDATE);
1190	if (err || !ctx->total)
1191		goto err1;
1192
1193	dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
1194						ctx->op, req->nbytes);
1195
1196	err = omap_sham_hw_init(dd);
1197	if (err)
1198		goto err1;
1199
1200	if (ctx->digcnt)
1201		/* request has changed - restore hash */
1202		dd->pdata->copy_hash(req, 0);
1203
1204	if (ctx->op == OP_UPDATE || (dd->flags & BIT(FLAGS_HUGE))) {
1205		err = omap_sham_update_req(dd);
1206		if (err != -EINPROGRESS && (ctx->flags & BIT(FLAGS_FINUP)))
1207			/* no final() after finup() */
1208			err = omap_sham_final_req(dd);
1209	} else if (ctx->op == OP_FINAL) {
1210		err = omap_sham_final_req(dd);
1211	}
1212err1:
1213	dev_dbg(dd->dev, "exit, err: %d\n", err);
1214
1215	if (err != -EINPROGRESS) {
1216		/* done_task will not finish it, so do it here */
1217		omap_sham_finish_req(req, err);
1218		req = NULL;
1219
1220		/*
1221		 * Execute next request immediately if there is anything
1222		 * in queue.
1223		 */
1224		goto retry;
1225	}
1226
1227	return ret;
1228}
1229
1230static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
1231{
1232	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1233	struct omap_sham_dev *dd = ctx->dd;
1234
1235	ctx->op = op;
1236
1237	return omap_sham_handle_queue(dd, req);
1238}
1239
1240static int omap_sham_update(struct ahash_request *req)
1241{
1242	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1243	struct omap_sham_dev *dd = omap_sham_find_dev(ctx);
1244
1245	if (!req->nbytes)
1246		return 0;
1247
1248	if (ctx->bufcnt + req->nbytes <= ctx->buflen) {
1249		scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
1250					 0, req->nbytes, 0);
1251		ctx->bufcnt += req->nbytes;
1252		return 0;
1253	}
1254
1255	if (dd->polling_mode)
1256		ctx->flags |= BIT(FLAGS_CPU);
1257
1258	return omap_sham_enqueue(req, OP_UPDATE);
1259}
1260
1261static int omap_sham_final_shash(struct ahash_request *req)
1262{
1263	struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1264	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1265	int offset = 0;
1266
1267	/*
1268	 * If we are running HMAC on limited hardware support, skip
1269	 * the ipad in the beginning of the buffer if we are going for
1270	 * software fallback algorithm.
1271	 */
1272	if (test_bit(FLAGS_HMAC, &ctx->flags) &&
1273	    !test_bit(FLAGS_AUTO_XOR, &ctx->dd->flags))
1274		offset = get_block_size(ctx);
1275
1276	return crypto_shash_tfm_digest(tctx->fallback, ctx->buffer + offset,
1277				       ctx->bufcnt - offset, req->result);
1278}
1279
1280static int omap_sham_final(struct ahash_request *req)
1281{
1282	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1283
1284	ctx->flags |= BIT(FLAGS_FINUP);
1285
1286	if (ctx->flags & BIT(FLAGS_ERROR))
1287		return 0; /* uncompleted hash is not needed */
1288
1289	/*
1290	 * OMAP HW accel works only with buffers >= 9.
1291	 * HMAC is always >= 9 because ipad == block size.
1292	 * If buffersize is less than fallback_sz, we use fallback
1293	 * SW encoding, as using DMA + HW in this case doesn't provide
1294	 * any benefit.
1295	 */
1296	if (!ctx->digcnt && ctx->bufcnt < ctx->dd->fallback_sz)
1297		return omap_sham_final_shash(req);
1298	else if (ctx->bufcnt)
1299		return omap_sham_enqueue(req, OP_FINAL);
1300
1301	/* copy ready hash (+ finalize hmac) */
1302	return omap_sham_finish(req);
1303}
1304
1305static int omap_sham_finup(struct ahash_request *req)
1306{
1307	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1308	int err1, err2;
1309
1310	ctx->flags |= BIT(FLAGS_FINUP);
1311
1312	err1 = omap_sham_update(req);
1313	if (err1 == -EINPROGRESS || err1 == -EBUSY)
1314		return err1;
1315	/*
1316	 * final() has to be always called to cleanup resources
1317	 * even if udpate() failed, except EINPROGRESS
1318	 */
1319	err2 = omap_sham_final(req);
1320
1321	return err1 ?: err2;
1322}
1323
1324static int omap_sham_digest(struct ahash_request *req)
1325{
1326	return omap_sham_init(req) ?: omap_sham_finup(req);
1327}
1328
1329static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
1330		      unsigned int keylen)
1331{
1332	struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
1333	struct omap_sham_hmac_ctx *bctx = tctx->base;
1334	int bs = crypto_shash_blocksize(bctx->shash);
1335	int ds = crypto_shash_digestsize(bctx->shash);
1336	int err, i;
1337
1338	err = crypto_shash_setkey(tctx->fallback, key, keylen);
1339	if (err)
1340		return err;
1341
1342	if (keylen > bs) {
1343		err = crypto_shash_tfm_digest(bctx->shash, key, keylen,
1344					      bctx->ipad);
1345		if (err)
1346			return err;
1347		keylen = ds;
1348	} else {
1349		memcpy(bctx->ipad, key, keylen);
1350	}
1351
1352	memset(bctx->ipad + keylen, 0, bs - keylen);
1353
1354	if (!test_bit(FLAGS_AUTO_XOR, &sham.flags)) {
1355		memcpy(bctx->opad, bctx->ipad, bs);
1356
1357		for (i = 0; i < bs; i++) {
1358			bctx->ipad[i] ^= HMAC_IPAD_VALUE;
1359			bctx->opad[i] ^= HMAC_OPAD_VALUE;
1360		}
1361	}
1362
1363	return err;
1364}
1365
1366static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
1367{
1368	struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
1369	const char *alg_name = crypto_tfm_alg_name(tfm);
1370
1371	/* Allocate a fallback and abort if it failed. */
1372	tctx->fallback = crypto_alloc_shash(alg_name, 0,
1373					    CRYPTO_ALG_NEED_FALLBACK);
1374	if (IS_ERR(tctx->fallback)) {
1375		pr_err("omap-sham: fallback driver '%s' "
1376				"could not be loaded.\n", alg_name);
1377		return PTR_ERR(tctx->fallback);
1378	}
1379
1380	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1381				 sizeof(struct omap_sham_reqctx) + BUFLEN);
1382
1383	if (alg_base) {
1384		struct omap_sham_hmac_ctx *bctx = tctx->base;
1385		tctx->flags |= BIT(FLAGS_HMAC);
1386		bctx->shash = crypto_alloc_shash(alg_base, 0,
1387						CRYPTO_ALG_NEED_FALLBACK);
1388		if (IS_ERR(bctx->shash)) {
1389			pr_err("omap-sham: base driver '%s' "
1390					"could not be loaded.\n", alg_base);
1391			crypto_free_shash(tctx->fallback);
1392			return PTR_ERR(bctx->shash);
1393		}
1394
1395	}
1396
 
 
 
 
1397	return 0;
1398}
1399
1400static int omap_sham_cra_init(struct crypto_tfm *tfm)
1401{
1402	return omap_sham_cra_init_alg(tfm, NULL);
1403}
1404
1405static int omap_sham_cra_sha1_init(struct crypto_tfm *tfm)
1406{
1407	return omap_sham_cra_init_alg(tfm, "sha1");
1408}
1409
1410static int omap_sham_cra_sha224_init(struct crypto_tfm *tfm)
1411{
1412	return omap_sham_cra_init_alg(tfm, "sha224");
1413}
1414
1415static int omap_sham_cra_sha256_init(struct crypto_tfm *tfm)
1416{
1417	return omap_sham_cra_init_alg(tfm, "sha256");
1418}
1419
1420static int omap_sham_cra_md5_init(struct crypto_tfm *tfm)
1421{
1422	return omap_sham_cra_init_alg(tfm, "md5");
1423}
1424
1425static int omap_sham_cra_sha384_init(struct crypto_tfm *tfm)
1426{
1427	return omap_sham_cra_init_alg(tfm, "sha384");
1428}
1429
1430static int omap_sham_cra_sha512_init(struct crypto_tfm *tfm)
1431{
1432	return omap_sham_cra_init_alg(tfm, "sha512");
1433}
1434
1435static void omap_sham_cra_exit(struct crypto_tfm *tfm)
1436{
1437	struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
1438
1439	crypto_free_shash(tctx->fallback);
1440	tctx->fallback = NULL;
1441
1442	if (tctx->flags & BIT(FLAGS_HMAC)) {
1443		struct omap_sham_hmac_ctx *bctx = tctx->base;
1444		crypto_free_shash(bctx->shash);
1445	}
1446}
1447
1448static int omap_sham_export(struct ahash_request *req, void *out)
1449{
1450	struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
1451
1452	memcpy(out, rctx, sizeof(*rctx) + rctx->bufcnt);
1453
1454	return 0;
1455}
1456
1457static int omap_sham_import(struct ahash_request *req, const void *in)
1458{
1459	struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
1460	const struct omap_sham_reqctx *ctx_in = in;
1461
1462	memcpy(rctx, in, sizeof(*rctx) + ctx_in->bufcnt);
1463
1464	return 0;
1465}
1466
1467static struct ahash_alg algs_sha1_md5[] = {
1468{
1469	.init		= omap_sham_init,
1470	.update		= omap_sham_update,
1471	.final		= omap_sham_final,
1472	.finup		= omap_sham_finup,
1473	.digest		= omap_sham_digest,
1474	.halg.digestsize	= SHA1_DIGEST_SIZE,
1475	.halg.base	= {
1476		.cra_name		= "sha1",
1477		.cra_driver_name	= "omap-sha1",
1478		.cra_priority		= 400,
1479		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1480						CRYPTO_ALG_ASYNC |
1481						CRYPTO_ALG_NEED_FALLBACK,
1482		.cra_blocksize		= SHA1_BLOCK_SIZE,
1483		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
1484		.cra_alignmask		= OMAP_ALIGN_MASK,
1485		.cra_module		= THIS_MODULE,
1486		.cra_init		= omap_sham_cra_init,
1487		.cra_exit		= omap_sham_cra_exit,
1488	}
1489},
1490{
1491	.init		= omap_sham_init,
1492	.update		= omap_sham_update,
1493	.final		= omap_sham_final,
1494	.finup		= omap_sham_finup,
1495	.digest		= omap_sham_digest,
1496	.halg.digestsize	= MD5_DIGEST_SIZE,
1497	.halg.base	= {
1498		.cra_name		= "md5",
1499		.cra_driver_name	= "omap-md5",
1500		.cra_priority		= 400,
1501		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1502						CRYPTO_ALG_ASYNC |
1503						CRYPTO_ALG_NEED_FALLBACK,
1504		.cra_blocksize		= SHA1_BLOCK_SIZE,
1505		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
1506		.cra_alignmask		= OMAP_ALIGN_MASK,
1507		.cra_module		= THIS_MODULE,
1508		.cra_init		= omap_sham_cra_init,
1509		.cra_exit		= omap_sham_cra_exit,
1510	}
1511},
1512{
1513	.init		= omap_sham_init,
1514	.update		= omap_sham_update,
1515	.final		= omap_sham_final,
1516	.finup		= omap_sham_finup,
1517	.digest		= omap_sham_digest,
1518	.setkey		= omap_sham_setkey,
1519	.halg.digestsize	= SHA1_DIGEST_SIZE,
1520	.halg.base	= {
1521		.cra_name		= "hmac(sha1)",
1522		.cra_driver_name	= "omap-hmac-sha1",
1523		.cra_priority		= 400,
1524		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1525						CRYPTO_ALG_ASYNC |
1526						CRYPTO_ALG_NEED_FALLBACK,
1527		.cra_blocksize		= SHA1_BLOCK_SIZE,
1528		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
1529					sizeof(struct omap_sham_hmac_ctx),
1530		.cra_alignmask		= OMAP_ALIGN_MASK,
1531		.cra_module		= THIS_MODULE,
1532		.cra_init		= omap_sham_cra_sha1_init,
1533		.cra_exit		= omap_sham_cra_exit,
1534	}
1535},
1536{
1537	.init		= omap_sham_init,
1538	.update		= omap_sham_update,
1539	.final		= omap_sham_final,
1540	.finup		= omap_sham_finup,
1541	.digest		= omap_sham_digest,
1542	.setkey		= omap_sham_setkey,
1543	.halg.digestsize	= MD5_DIGEST_SIZE,
1544	.halg.base	= {
1545		.cra_name		= "hmac(md5)",
1546		.cra_driver_name	= "omap-hmac-md5",
1547		.cra_priority		= 400,
1548		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1549						CRYPTO_ALG_ASYNC |
1550						CRYPTO_ALG_NEED_FALLBACK,
1551		.cra_blocksize		= SHA1_BLOCK_SIZE,
1552		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
1553					sizeof(struct omap_sham_hmac_ctx),
1554		.cra_alignmask		= OMAP_ALIGN_MASK,
1555		.cra_module		= THIS_MODULE,
1556		.cra_init		= omap_sham_cra_md5_init,
1557		.cra_exit		= omap_sham_cra_exit,
1558	}
1559}
1560};
1561
1562/* OMAP4 has some algs in addition to what OMAP2 has */
1563static struct ahash_alg algs_sha224_sha256[] = {
1564{
1565	.init		= omap_sham_init,
1566	.update		= omap_sham_update,
1567	.final		= omap_sham_final,
1568	.finup		= omap_sham_finup,
1569	.digest		= omap_sham_digest,
1570	.halg.digestsize	= SHA224_DIGEST_SIZE,
1571	.halg.base	= {
1572		.cra_name		= "sha224",
1573		.cra_driver_name	= "omap-sha224",
1574		.cra_priority		= 400,
1575		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1576						CRYPTO_ALG_ASYNC |
1577						CRYPTO_ALG_NEED_FALLBACK,
1578		.cra_blocksize		= SHA224_BLOCK_SIZE,
1579		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
1580		.cra_alignmask		= OMAP_ALIGN_MASK,
1581		.cra_module		= THIS_MODULE,
1582		.cra_init		= omap_sham_cra_init,
1583		.cra_exit		= omap_sham_cra_exit,
1584	}
1585},
1586{
1587	.init		= omap_sham_init,
1588	.update		= omap_sham_update,
1589	.final		= omap_sham_final,
1590	.finup		= omap_sham_finup,
1591	.digest		= omap_sham_digest,
1592	.halg.digestsize	= SHA256_DIGEST_SIZE,
1593	.halg.base	= {
1594		.cra_name		= "sha256",
1595		.cra_driver_name	= "omap-sha256",
1596		.cra_priority		= 400,
1597		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1598						CRYPTO_ALG_ASYNC |
1599						CRYPTO_ALG_NEED_FALLBACK,
1600		.cra_blocksize		= SHA256_BLOCK_SIZE,
1601		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
1602		.cra_alignmask		= OMAP_ALIGN_MASK,
1603		.cra_module		= THIS_MODULE,
1604		.cra_init		= omap_sham_cra_init,
1605		.cra_exit		= omap_sham_cra_exit,
1606	}
1607},
1608{
1609	.init		= omap_sham_init,
1610	.update		= omap_sham_update,
1611	.final		= omap_sham_final,
1612	.finup		= omap_sham_finup,
1613	.digest		= omap_sham_digest,
1614	.setkey		= omap_sham_setkey,
1615	.halg.digestsize	= SHA224_DIGEST_SIZE,
1616	.halg.base	= {
1617		.cra_name		= "hmac(sha224)",
1618		.cra_driver_name	= "omap-hmac-sha224",
1619		.cra_priority		= 400,
1620		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1621						CRYPTO_ALG_ASYNC |
1622						CRYPTO_ALG_NEED_FALLBACK,
1623		.cra_blocksize		= SHA224_BLOCK_SIZE,
1624		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
1625					sizeof(struct omap_sham_hmac_ctx),
1626		.cra_alignmask		= OMAP_ALIGN_MASK,
1627		.cra_module		= THIS_MODULE,
1628		.cra_init		= omap_sham_cra_sha224_init,
1629		.cra_exit		= omap_sham_cra_exit,
1630	}
1631},
1632{
1633	.init		= omap_sham_init,
1634	.update		= omap_sham_update,
1635	.final		= omap_sham_final,
1636	.finup		= omap_sham_finup,
1637	.digest		= omap_sham_digest,
1638	.setkey		= omap_sham_setkey,
1639	.halg.digestsize	= SHA256_DIGEST_SIZE,
1640	.halg.base	= {
1641		.cra_name		= "hmac(sha256)",
1642		.cra_driver_name	= "omap-hmac-sha256",
1643		.cra_priority		= 400,
1644		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1645						CRYPTO_ALG_ASYNC |
1646						CRYPTO_ALG_NEED_FALLBACK,
1647		.cra_blocksize		= SHA256_BLOCK_SIZE,
1648		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
1649					sizeof(struct omap_sham_hmac_ctx),
1650		.cra_alignmask		= OMAP_ALIGN_MASK,
1651		.cra_module		= THIS_MODULE,
1652		.cra_init		= omap_sham_cra_sha256_init,
1653		.cra_exit		= omap_sham_cra_exit,
1654	}
1655},
1656};
1657
1658static struct ahash_alg algs_sha384_sha512[] = {
1659{
1660	.init		= omap_sham_init,
1661	.update		= omap_sham_update,
1662	.final		= omap_sham_final,
1663	.finup		= omap_sham_finup,
1664	.digest		= omap_sham_digest,
1665	.halg.digestsize	= SHA384_DIGEST_SIZE,
1666	.halg.base	= {
1667		.cra_name		= "sha384",
1668		.cra_driver_name	= "omap-sha384",
1669		.cra_priority		= 400,
1670		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1671						CRYPTO_ALG_ASYNC |
1672						CRYPTO_ALG_NEED_FALLBACK,
1673		.cra_blocksize		= SHA384_BLOCK_SIZE,
1674		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
1675		.cra_alignmask		= OMAP_ALIGN_MASK,
1676		.cra_module		= THIS_MODULE,
1677		.cra_init		= omap_sham_cra_init,
1678		.cra_exit		= omap_sham_cra_exit,
1679	}
1680},
1681{
1682	.init		= omap_sham_init,
1683	.update		= omap_sham_update,
1684	.final		= omap_sham_final,
1685	.finup		= omap_sham_finup,
1686	.digest		= omap_sham_digest,
1687	.halg.digestsize	= SHA512_DIGEST_SIZE,
1688	.halg.base	= {
1689		.cra_name		= "sha512",
1690		.cra_driver_name	= "omap-sha512",
1691		.cra_priority		= 400,
1692		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1693						CRYPTO_ALG_ASYNC |
1694						CRYPTO_ALG_NEED_FALLBACK,
1695		.cra_blocksize		= SHA512_BLOCK_SIZE,
1696		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
1697		.cra_alignmask		= OMAP_ALIGN_MASK,
1698		.cra_module		= THIS_MODULE,
1699		.cra_init		= omap_sham_cra_init,
1700		.cra_exit		= omap_sham_cra_exit,
1701	}
1702},
1703{
1704	.init		= omap_sham_init,
1705	.update		= omap_sham_update,
1706	.final		= omap_sham_final,
1707	.finup		= omap_sham_finup,
1708	.digest		= omap_sham_digest,
1709	.setkey		= omap_sham_setkey,
1710	.halg.digestsize	= SHA384_DIGEST_SIZE,
1711	.halg.base	= {
1712		.cra_name		= "hmac(sha384)",
1713		.cra_driver_name	= "omap-hmac-sha384",
1714		.cra_priority		= 400,
1715		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1716						CRYPTO_ALG_ASYNC |
1717						CRYPTO_ALG_NEED_FALLBACK,
1718		.cra_blocksize		= SHA384_BLOCK_SIZE,
1719		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
1720					sizeof(struct omap_sham_hmac_ctx),
1721		.cra_alignmask		= OMAP_ALIGN_MASK,
1722		.cra_module		= THIS_MODULE,
1723		.cra_init		= omap_sham_cra_sha384_init,
1724		.cra_exit		= omap_sham_cra_exit,
1725	}
1726},
1727{
1728	.init		= omap_sham_init,
1729	.update		= omap_sham_update,
1730	.final		= omap_sham_final,
1731	.finup		= omap_sham_finup,
1732	.digest		= omap_sham_digest,
1733	.setkey		= omap_sham_setkey,
1734	.halg.digestsize	= SHA512_DIGEST_SIZE,
1735	.halg.base	= {
1736		.cra_name		= "hmac(sha512)",
1737		.cra_driver_name	= "omap-hmac-sha512",
1738		.cra_priority		= 400,
1739		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1740						CRYPTO_ALG_ASYNC |
1741						CRYPTO_ALG_NEED_FALLBACK,
1742		.cra_blocksize		= SHA512_BLOCK_SIZE,
1743		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
1744					sizeof(struct omap_sham_hmac_ctx),
1745		.cra_alignmask		= OMAP_ALIGN_MASK,
1746		.cra_module		= THIS_MODULE,
1747		.cra_init		= omap_sham_cra_sha512_init,
1748		.cra_exit		= omap_sham_cra_exit,
1749	}
1750},
1751};
1752
1753static void omap_sham_done_task(unsigned long data)
1754{
1755	struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
1756	int err = 0;
1757
1758	dev_dbg(dd->dev, "%s: flags=%lx\n", __func__, dd->flags);
1759
1760	if (!test_bit(FLAGS_BUSY, &dd->flags)) {
1761		omap_sham_handle_queue(dd, NULL);
1762		return;
1763	}
1764
1765	if (test_bit(FLAGS_CPU, &dd->flags)) {
1766		if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags))
1767			goto finish;
1768	} else if (test_bit(FLAGS_DMA_READY, &dd->flags)) {
1769		if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) {
1770			omap_sham_update_dma_stop(dd);
1771			if (dd->err) {
1772				err = dd->err;
1773				goto finish;
1774			}
1775		}
1776		if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) {
1777			/* hash or semi-hash ready */
1778			clear_bit(FLAGS_DMA_READY, &dd->flags);
1779			goto finish;
1780		}
1781	}
1782
1783	return;
1784
1785finish:
1786	dev_dbg(dd->dev, "update done: err: %d\n", err);
1787	/* finish curent request */
1788	omap_sham_finish_req(dd->req, err);
1789
1790	/* If we are not busy, process next req */
1791	if (!test_bit(FLAGS_BUSY, &dd->flags))
1792		omap_sham_handle_queue(dd, NULL);
1793}
1794
1795static irqreturn_t omap_sham_irq_common(struct omap_sham_dev *dd)
1796{
1797	if (!test_bit(FLAGS_BUSY, &dd->flags)) {
1798		dev_warn(dd->dev, "Interrupt when no active requests.\n");
1799	} else {
1800		set_bit(FLAGS_OUTPUT_READY, &dd->flags);
1801		tasklet_schedule(&dd->done_task);
1802	}
1803
1804	return IRQ_HANDLED;
1805}
1806
1807static irqreturn_t omap_sham_irq_omap2(int irq, void *dev_id)
1808{
1809	struct omap_sham_dev *dd = dev_id;
1810
1811	if (unlikely(test_bit(FLAGS_FINAL, &dd->flags)))
1812		/* final -> allow device to go to power-saving mode */
1813		omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH);
1814
1815	omap_sham_write_mask(dd, SHA_REG_CTRL, SHA_REG_CTRL_OUTPUT_READY,
1816				 SHA_REG_CTRL_OUTPUT_READY);
1817	omap_sham_read(dd, SHA_REG_CTRL);
1818
1819	return omap_sham_irq_common(dd);
1820}
1821
1822static irqreturn_t omap_sham_irq_omap4(int irq, void *dev_id)
1823{
1824	struct omap_sham_dev *dd = dev_id;
1825
1826	omap_sham_write_mask(dd, SHA_REG_MASK(dd), 0, SHA_REG_MASK_IT_EN);
1827
1828	return omap_sham_irq_common(dd);
1829}
1830
1831static struct omap_sham_algs_info omap_sham_algs_info_omap2[] = {
1832	{
1833		.algs_list	= algs_sha1_md5,
1834		.size		= ARRAY_SIZE(algs_sha1_md5),
1835	},
1836};
1837
1838static const struct omap_sham_pdata omap_sham_pdata_omap2 = {
1839	.algs_info	= omap_sham_algs_info_omap2,
1840	.algs_info_size	= ARRAY_SIZE(omap_sham_algs_info_omap2),
1841	.flags		= BIT(FLAGS_BE32_SHA1),
1842	.digest_size	= SHA1_DIGEST_SIZE,
1843	.copy_hash	= omap_sham_copy_hash_omap2,
1844	.write_ctrl	= omap_sham_write_ctrl_omap2,
1845	.trigger	= omap_sham_trigger_omap2,
1846	.poll_irq	= omap_sham_poll_irq_omap2,
1847	.intr_hdlr	= omap_sham_irq_omap2,
1848	.idigest_ofs	= 0x00,
1849	.din_ofs	= 0x1c,
1850	.digcnt_ofs	= 0x14,
1851	.rev_ofs	= 0x5c,
1852	.mask_ofs	= 0x60,
1853	.sysstatus_ofs	= 0x64,
1854	.major_mask	= 0xf0,
1855	.major_shift	= 4,
1856	.minor_mask	= 0x0f,
1857	.minor_shift	= 0,
1858};
1859
1860#ifdef CONFIG_OF
1861static struct omap_sham_algs_info omap_sham_algs_info_omap4[] = {
1862	{
1863		.algs_list	= algs_sha1_md5,
1864		.size		= ARRAY_SIZE(algs_sha1_md5),
1865	},
1866	{
1867		.algs_list	= algs_sha224_sha256,
1868		.size		= ARRAY_SIZE(algs_sha224_sha256),
1869	},
1870};
1871
1872static const struct omap_sham_pdata omap_sham_pdata_omap4 = {
1873	.algs_info	= omap_sham_algs_info_omap4,
1874	.algs_info_size	= ARRAY_SIZE(omap_sham_algs_info_omap4),
1875	.flags		= BIT(FLAGS_AUTO_XOR),
1876	.digest_size	= SHA256_DIGEST_SIZE,
1877	.copy_hash	= omap_sham_copy_hash_omap4,
1878	.write_ctrl	= omap_sham_write_ctrl_omap4,
1879	.trigger	= omap_sham_trigger_omap4,
1880	.poll_irq	= omap_sham_poll_irq_omap4,
1881	.intr_hdlr	= omap_sham_irq_omap4,
1882	.idigest_ofs	= 0x020,
1883	.odigest_ofs	= 0x0,
1884	.din_ofs	= 0x080,
1885	.digcnt_ofs	= 0x040,
1886	.rev_ofs	= 0x100,
1887	.mask_ofs	= 0x110,
1888	.sysstatus_ofs	= 0x114,
1889	.mode_ofs	= 0x44,
1890	.length_ofs	= 0x48,
1891	.major_mask	= 0x0700,
1892	.major_shift	= 8,
1893	.minor_mask	= 0x003f,
1894	.minor_shift	= 0,
1895};
1896
1897static struct omap_sham_algs_info omap_sham_algs_info_omap5[] = {
1898	{
1899		.algs_list	= algs_sha1_md5,
1900		.size		= ARRAY_SIZE(algs_sha1_md5),
1901	},
1902	{
1903		.algs_list	= algs_sha224_sha256,
1904		.size		= ARRAY_SIZE(algs_sha224_sha256),
1905	},
1906	{
1907		.algs_list	= algs_sha384_sha512,
1908		.size		= ARRAY_SIZE(algs_sha384_sha512),
1909	},
1910};
1911
1912static const struct omap_sham_pdata omap_sham_pdata_omap5 = {
1913	.algs_info	= omap_sham_algs_info_omap5,
1914	.algs_info_size	= ARRAY_SIZE(omap_sham_algs_info_omap5),
1915	.flags		= BIT(FLAGS_AUTO_XOR),
1916	.digest_size	= SHA512_DIGEST_SIZE,
1917	.copy_hash	= omap_sham_copy_hash_omap4,
1918	.write_ctrl	= omap_sham_write_ctrl_omap4,
1919	.trigger	= omap_sham_trigger_omap4,
1920	.poll_irq	= omap_sham_poll_irq_omap4,
1921	.intr_hdlr	= omap_sham_irq_omap4,
1922	.idigest_ofs	= 0x240,
1923	.odigest_ofs	= 0x200,
1924	.din_ofs	= 0x080,
1925	.digcnt_ofs	= 0x280,
1926	.rev_ofs	= 0x100,
1927	.mask_ofs	= 0x110,
1928	.sysstatus_ofs	= 0x114,
1929	.mode_ofs	= 0x284,
1930	.length_ofs	= 0x288,
1931	.major_mask	= 0x0700,
1932	.major_shift	= 8,
1933	.minor_mask	= 0x003f,
1934	.minor_shift	= 0,
1935};
1936
1937static const struct of_device_id omap_sham_of_match[] = {
1938	{
1939		.compatible	= "ti,omap2-sham",
1940		.data		= &omap_sham_pdata_omap2,
1941	},
1942	{
1943		.compatible	= "ti,omap3-sham",
1944		.data		= &omap_sham_pdata_omap2,
1945	},
1946	{
1947		.compatible	= "ti,omap4-sham",
1948		.data		= &omap_sham_pdata_omap4,
1949	},
1950	{
1951		.compatible	= "ti,omap5-sham",
1952		.data		= &omap_sham_pdata_omap5,
1953	},
1954	{},
1955};
1956MODULE_DEVICE_TABLE(of, omap_sham_of_match);
1957
1958static int omap_sham_get_res_of(struct omap_sham_dev *dd,
1959		struct device *dev, struct resource *res)
1960{
1961	struct device_node *node = dev->of_node;
1962	int err = 0;
1963
1964	dd->pdata = of_device_get_match_data(dev);
1965	if (!dd->pdata) {
1966		dev_err(dev, "no compatible OF match\n");
1967		err = -EINVAL;
1968		goto err;
1969	}
1970
1971	err = of_address_to_resource(node, 0, res);
1972	if (err < 0) {
1973		dev_err(dev, "can't translate OF node address\n");
1974		err = -EINVAL;
1975		goto err;
1976	}
1977
1978	dd->irq = irq_of_parse_and_map(node, 0);
1979	if (!dd->irq) {
1980		dev_err(dev, "can't translate OF irq value\n");
1981		err = -EINVAL;
1982		goto err;
1983	}
1984
1985err:
1986	return err;
1987}
1988#else
1989static const struct of_device_id omap_sham_of_match[] = {
1990	{},
1991};
1992
1993static int omap_sham_get_res_of(struct omap_sham_dev *dd,
1994		struct device *dev, struct resource *res)
1995{
1996	return -EINVAL;
1997}
1998#endif
1999
2000static int omap_sham_get_res_pdev(struct omap_sham_dev *dd,
2001		struct platform_device *pdev, struct resource *res)
2002{
2003	struct device *dev = &pdev->dev;
2004	struct resource *r;
2005	int err = 0;
2006
2007	/* Get the base address */
2008	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2009	if (!r) {
2010		dev_err(dev, "no MEM resource info\n");
2011		err = -ENODEV;
2012		goto err;
2013	}
2014	memcpy(res, r, sizeof(*res));
2015
2016	/* Get the IRQ */
2017	dd->irq = platform_get_irq(pdev, 0);
2018	if (dd->irq < 0) {
2019		err = dd->irq;
2020		goto err;
2021	}
2022
2023	/* Only OMAP2/3 can be non-DT */
2024	dd->pdata = &omap_sham_pdata_omap2;
2025
2026err:
2027	return err;
2028}
2029
2030static ssize_t fallback_show(struct device *dev, struct device_attribute *attr,
2031			     char *buf)
2032{
2033	struct omap_sham_dev *dd = dev_get_drvdata(dev);
2034
2035	return sprintf(buf, "%d\n", dd->fallback_sz);
2036}
2037
2038static ssize_t fallback_store(struct device *dev, struct device_attribute *attr,
2039			      const char *buf, size_t size)
2040{
2041	struct omap_sham_dev *dd = dev_get_drvdata(dev);
2042	ssize_t status;
2043	long value;
2044
2045	status = kstrtol(buf, 0, &value);
2046	if (status)
2047		return status;
2048
2049	/* HW accelerator only works with buffers > 9 */
2050	if (value < 9) {
2051		dev_err(dev, "minimum fallback size 9\n");
2052		return -EINVAL;
2053	}
2054
2055	dd->fallback_sz = value;
2056
2057	return size;
2058}
2059
2060static ssize_t queue_len_show(struct device *dev, struct device_attribute *attr,
2061			      char *buf)
2062{
2063	struct omap_sham_dev *dd = dev_get_drvdata(dev);
2064
2065	return sprintf(buf, "%d\n", dd->queue.max_qlen);
2066}
2067
2068static ssize_t queue_len_store(struct device *dev,
2069			       struct device_attribute *attr, const char *buf,
2070			       size_t size)
2071{
2072	struct omap_sham_dev *dd = dev_get_drvdata(dev);
2073	ssize_t status;
2074	long value;
2075	unsigned long flags;
2076
2077	status = kstrtol(buf, 0, &value);
2078	if (status)
2079		return status;
2080
2081	if (value < 1)
2082		return -EINVAL;
2083
2084	/*
2085	 * Changing the queue size in fly is safe, if size becomes smaller
2086	 * than current size, it will just not accept new entries until
2087	 * it has shrank enough.
2088	 */
2089	spin_lock_irqsave(&dd->lock, flags);
2090	dd->queue.max_qlen = value;
2091	spin_unlock_irqrestore(&dd->lock, flags);
2092
2093	return size;
2094}
2095
2096static DEVICE_ATTR_RW(queue_len);
2097static DEVICE_ATTR_RW(fallback);
2098
2099static struct attribute *omap_sham_attrs[] = {
2100	&dev_attr_queue_len.attr,
2101	&dev_attr_fallback.attr,
2102	NULL,
2103};
2104
2105static struct attribute_group omap_sham_attr_group = {
2106	.attrs = omap_sham_attrs,
2107};
2108
2109static int omap_sham_probe(struct platform_device *pdev)
2110{
2111	struct omap_sham_dev *dd;
2112	struct device *dev = &pdev->dev;
2113	struct resource res;
2114	dma_cap_mask_t mask;
2115	int err, i, j;
2116	u32 rev;
2117
2118	dd = devm_kzalloc(dev, sizeof(struct omap_sham_dev), GFP_KERNEL);
2119	if (dd == NULL) {
2120		dev_err(dev, "unable to alloc data struct.\n");
2121		err = -ENOMEM;
2122		goto data_err;
2123	}
2124	dd->dev = dev;
2125	platform_set_drvdata(pdev, dd);
2126
2127	INIT_LIST_HEAD(&dd->list);
2128	spin_lock_init(&dd->lock);
2129	tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd);
2130	crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH);
2131
2132	err = (dev->of_node) ? omap_sham_get_res_of(dd, dev, &res) :
2133			       omap_sham_get_res_pdev(dd, pdev, &res);
2134	if (err)
2135		goto data_err;
2136
2137	dd->io_base = devm_ioremap_resource(dev, &res);
2138	if (IS_ERR(dd->io_base)) {
2139		err = PTR_ERR(dd->io_base);
2140		goto data_err;
2141	}
2142	dd->phys_base = res.start;
2143
2144	err = devm_request_irq(dev, dd->irq, dd->pdata->intr_hdlr,
2145			       IRQF_TRIGGER_NONE, dev_name(dev), dd);
2146	if (err) {
2147		dev_err(dev, "unable to request irq %d, err = %d\n",
2148			dd->irq, err);
2149		goto data_err;
2150	}
2151
2152	dma_cap_zero(mask);
2153	dma_cap_set(DMA_SLAVE, mask);
2154
2155	dd->dma_lch = dma_request_chan(dev, "rx");
2156	if (IS_ERR(dd->dma_lch)) {
2157		err = PTR_ERR(dd->dma_lch);
2158		if (err == -EPROBE_DEFER)
2159			goto data_err;
2160
2161		dd->polling_mode = 1;
2162		dev_dbg(dev, "using polling mode instead of dma\n");
2163	}
2164
2165	dd->flags |= dd->pdata->flags;
2166	sham.flags |= dd->pdata->flags;
2167
2168	pm_runtime_use_autosuspend(dev);
2169	pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY);
2170
2171	dd->fallback_sz = OMAP_SHA_DMA_THRESHOLD;
2172
2173	pm_runtime_enable(dev);
2174	pm_runtime_irq_safe(dev);
2175
2176	err = pm_runtime_get_sync(dev);
2177	if (err < 0) {
2178		dev_err(dev, "failed to get sync: %d\n", err);
2179		goto err_pm;
2180	}
2181
2182	rev = omap_sham_read(dd, SHA_REG_REV(dd));
2183	pm_runtime_put_sync(&pdev->dev);
2184
2185	dev_info(dev, "hw accel on OMAP rev %u.%u\n",
2186		(rev & dd->pdata->major_mask) >> dd->pdata->major_shift,
2187		(rev & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
2188
2189	spin_lock(&sham.lock);
2190	list_add_tail(&dd->list, &sham.dev_list);
2191	spin_unlock(&sham.lock);
 
 
 
 
 
 
 
 
 
 
2192
2193	for (i = 0; i < dd->pdata->algs_info_size; i++) {
2194		if (dd->pdata->algs_info[i].registered)
2195			break;
2196
2197		for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
2198			struct ahash_alg *alg;
2199
2200			alg = &dd->pdata->algs_info[i].algs_list[j];
2201			alg->export = omap_sham_export;
2202			alg->import = omap_sham_import;
2203			alg->halg.statesize = sizeof(struct omap_sham_reqctx) +
2204					      BUFLEN;
2205			err = crypto_register_ahash(alg);
2206			if (err)
2207				goto err_algs;
2208
2209			dd->pdata->algs_info[i].registered++;
2210		}
2211	}
2212
2213	err = sysfs_create_group(&dev->kobj, &omap_sham_attr_group);
2214	if (err) {
2215		dev_err(dev, "could not create sysfs device attrs\n");
2216		goto err_algs;
2217	}
2218
2219	return 0;
2220
2221err_algs:
2222	for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
2223		for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
2224			crypto_unregister_ahash(
2225					&dd->pdata->algs_info[i].algs_list[j]);
 
 
 
 
 
 
2226err_pm:
2227	pm_runtime_disable(dev);
2228	if (!dd->polling_mode)
2229		dma_release_channel(dd->dma_lch);
2230data_err:
2231	dev_err(dev, "initialization failed.\n");
2232
2233	return err;
2234}
2235
2236static int omap_sham_remove(struct platform_device *pdev)
2237{
2238	struct omap_sham_dev *dd;
2239	int i, j;
2240
2241	dd = platform_get_drvdata(pdev);
2242	if (!dd)
2243		return -ENODEV;
2244	spin_lock(&sham.lock);
2245	list_del(&dd->list);
2246	spin_unlock(&sham.lock);
2247	for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
2248		for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) {
2249			crypto_unregister_ahash(
2250					&dd->pdata->algs_info[i].algs_list[j]);
2251			dd->pdata->algs_info[i].registered--;
2252		}
2253	tasklet_kill(&dd->done_task);
2254	pm_runtime_disable(&pdev->dev);
2255
2256	if (!dd->polling_mode)
2257		dma_release_channel(dd->dma_lch);
2258
2259	sysfs_remove_group(&dd->dev->kobj, &omap_sham_attr_group);
2260
2261	return 0;
2262}
2263
2264#ifdef CONFIG_PM_SLEEP
2265static int omap_sham_suspend(struct device *dev)
2266{
2267	pm_runtime_put_sync(dev);
2268	return 0;
2269}
2270
2271static int omap_sham_resume(struct device *dev)
2272{
2273	int err = pm_runtime_get_sync(dev);
2274	if (err < 0) {
2275		dev_err(dev, "failed to get sync: %d\n", err);
2276		return err;
2277	}
2278	return 0;
2279}
2280#endif
2281
2282static SIMPLE_DEV_PM_OPS(omap_sham_pm_ops, omap_sham_suspend, omap_sham_resume);
2283
2284static struct platform_driver omap_sham_driver = {
2285	.probe	= omap_sham_probe,
2286	.remove	= omap_sham_remove,
2287	.driver	= {
2288		.name	= "omap-sham",
2289		.pm	= &omap_sham_pm_ops,
2290		.of_match_table	= omap_sham_of_match,
2291	},
2292};
2293
2294module_platform_driver(omap_sham_driver);
2295
2296MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support.");
2297MODULE_LICENSE("GPL v2");
2298MODULE_AUTHOR("Dmitry Kasatkin");
2299MODULE_ALIAS("platform:omap-sham");
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Cryptographic API.
   4 *
   5 * Support for OMAP SHA1/MD5 HW acceleration.
   6 *
   7 * Copyright (c) 2010 Nokia Corporation
   8 * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
   9 * Copyright (c) 2011 Texas Instruments Incorporated
  10 *
  11 * Some ideas are from old omap-sha1-md5.c driver.
  12 */
  13
  14#define pr_fmt(fmt) "%s: " fmt, __func__
  15
  16#include <linux/err.h>
  17#include <linux/device.h>
  18#include <linux/module.h>
  19#include <linux/init.h>
  20#include <linux/errno.h>
  21#include <linux/interrupt.h>
  22#include <linux/kernel.h>
  23#include <linux/irq.h>
  24#include <linux/io.h>
  25#include <linux/platform_device.h>
  26#include <linux/scatterlist.h>
  27#include <linux/dma-mapping.h>
  28#include <linux/dmaengine.h>
  29#include <linux/pm_runtime.h>
  30#include <linux/of.h>
  31#include <linux/of_device.h>
  32#include <linux/of_address.h>
  33#include <linux/of_irq.h>
  34#include <linux/delay.h>
  35#include <linux/crypto.h>
  36#include <crypto/scatterwalk.h>
  37#include <crypto/algapi.h>
  38#include <crypto/sha1.h>
  39#include <crypto/sha2.h>
  40#include <crypto/hash.h>
  41#include <crypto/hmac.h>
  42#include <crypto/internal/hash.h>
  43#include <crypto/engine.h>
  44
  45#define MD5_DIGEST_SIZE			16
  46
  47#define SHA_REG_IDIGEST(dd, x)		((dd)->pdata->idigest_ofs + ((x)*0x04))
  48#define SHA_REG_DIN(dd, x)		((dd)->pdata->din_ofs + ((x) * 0x04))
  49#define SHA_REG_DIGCNT(dd)		((dd)->pdata->digcnt_ofs)
  50
  51#define SHA_REG_ODIGEST(dd, x)		((dd)->pdata->odigest_ofs + (x * 0x04))
  52
  53#define SHA_REG_CTRL			0x18
  54#define SHA_REG_CTRL_LENGTH		(0xFFFFFFFF << 5)
  55#define SHA_REG_CTRL_CLOSE_HASH		(1 << 4)
  56#define SHA_REG_CTRL_ALGO_CONST		(1 << 3)
  57#define SHA_REG_CTRL_ALGO		(1 << 2)
  58#define SHA_REG_CTRL_INPUT_READY	(1 << 1)
  59#define SHA_REG_CTRL_OUTPUT_READY	(1 << 0)
  60
  61#define SHA_REG_REV(dd)			((dd)->pdata->rev_ofs)
  62
  63#define SHA_REG_MASK(dd)		((dd)->pdata->mask_ofs)
  64#define SHA_REG_MASK_DMA_EN		(1 << 3)
  65#define SHA_REG_MASK_IT_EN		(1 << 2)
  66#define SHA_REG_MASK_SOFTRESET		(1 << 1)
  67#define SHA_REG_AUTOIDLE		(1 << 0)
  68
  69#define SHA_REG_SYSSTATUS(dd)		((dd)->pdata->sysstatus_ofs)
  70#define SHA_REG_SYSSTATUS_RESETDONE	(1 << 0)
  71
  72#define SHA_REG_MODE(dd)		((dd)->pdata->mode_ofs)
  73#define SHA_REG_MODE_HMAC_OUTER_HASH	(1 << 7)
  74#define SHA_REG_MODE_HMAC_KEY_PROC	(1 << 5)
  75#define SHA_REG_MODE_CLOSE_HASH		(1 << 4)
  76#define SHA_REG_MODE_ALGO_CONSTANT	(1 << 3)
  77
  78#define SHA_REG_MODE_ALGO_MASK		(7 << 0)
  79#define SHA_REG_MODE_ALGO_MD5_128	(0 << 1)
  80#define SHA_REG_MODE_ALGO_SHA1_160	(1 << 1)
  81#define SHA_REG_MODE_ALGO_SHA2_224	(2 << 1)
  82#define SHA_REG_MODE_ALGO_SHA2_256	(3 << 1)
  83#define SHA_REG_MODE_ALGO_SHA2_384	(1 << 0)
  84#define SHA_REG_MODE_ALGO_SHA2_512	(3 << 0)
  85
  86#define SHA_REG_LENGTH(dd)		((dd)->pdata->length_ofs)
  87
  88#define SHA_REG_IRQSTATUS		0x118
  89#define SHA_REG_IRQSTATUS_CTX_RDY	(1 << 3)
  90#define SHA_REG_IRQSTATUS_PARTHASH_RDY (1 << 2)
  91#define SHA_REG_IRQSTATUS_INPUT_RDY	(1 << 1)
  92#define SHA_REG_IRQSTATUS_OUTPUT_RDY	(1 << 0)
  93
  94#define SHA_REG_IRQENA			0x11C
  95#define SHA_REG_IRQENA_CTX_RDY		(1 << 3)
  96#define SHA_REG_IRQENA_PARTHASH_RDY	(1 << 2)
  97#define SHA_REG_IRQENA_INPUT_RDY	(1 << 1)
  98#define SHA_REG_IRQENA_OUTPUT_RDY	(1 << 0)
  99
 100#define DEFAULT_TIMEOUT_INTERVAL	HZ
 101
 102#define DEFAULT_AUTOSUSPEND_DELAY	1000
 103
 104/* mostly device flags */
 
 105#define FLAGS_FINAL		1
 106#define FLAGS_DMA_ACTIVE	2
 107#define FLAGS_OUTPUT_READY	3
 108#define FLAGS_INIT		4
 109#define FLAGS_CPU		5
 110#define FLAGS_DMA_READY		6
 111#define FLAGS_AUTO_XOR		7
 112#define FLAGS_BE32_SHA1		8
 113#define FLAGS_SGS_COPIED	9
 114#define FLAGS_SGS_ALLOCED	10
 115#define FLAGS_HUGE		11
 116
 117/* context flags */
 118#define FLAGS_FINUP		16
 119
 120#define FLAGS_MODE_SHIFT	18
 121#define FLAGS_MODE_MASK		(SHA_REG_MODE_ALGO_MASK	<< FLAGS_MODE_SHIFT)
 122#define FLAGS_MODE_MD5		(SHA_REG_MODE_ALGO_MD5_128 << FLAGS_MODE_SHIFT)
 123#define FLAGS_MODE_SHA1		(SHA_REG_MODE_ALGO_SHA1_160 << FLAGS_MODE_SHIFT)
 124#define FLAGS_MODE_SHA224	(SHA_REG_MODE_ALGO_SHA2_224 << FLAGS_MODE_SHIFT)
 125#define FLAGS_MODE_SHA256	(SHA_REG_MODE_ALGO_SHA2_256 << FLAGS_MODE_SHIFT)
 126#define FLAGS_MODE_SHA384	(SHA_REG_MODE_ALGO_SHA2_384 << FLAGS_MODE_SHIFT)
 127#define FLAGS_MODE_SHA512	(SHA_REG_MODE_ALGO_SHA2_512 << FLAGS_MODE_SHIFT)
 128
 129#define FLAGS_HMAC		21
 130#define FLAGS_ERROR		22
 131
 132#define OP_UPDATE		1
 133#define OP_FINAL		2
 134
 135#define OMAP_ALIGN_MASK		(sizeof(u32)-1)
 136#define OMAP_ALIGNED		__attribute__((aligned(sizeof(u32))))
 137
 138#define BUFLEN			SHA512_BLOCK_SIZE
 139#define OMAP_SHA_DMA_THRESHOLD	256
 140
 141#define OMAP_SHA_MAX_DMA_LEN	(1024 * 2048)
 142
 143struct omap_sham_dev;
 144
 145struct omap_sham_reqctx {
 146	struct omap_sham_dev	*dd;
 147	unsigned long		flags;
 148	u8			op;
 149
 150	u8			digest[SHA512_DIGEST_SIZE] OMAP_ALIGNED;
 151	size_t			digcnt;
 152	size_t			bufcnt;
 153	size_t			buflen;
 154
 155	/* walk state */
 156	struct scatterlist	*sg;
 157	struct scatterlist	sgl[2];
 158	int			offset;	/* offset in current sg */
 159	int			sg_len;
 160	unsigned int		total;	/* total request */
 161
 162	u8			buffer[] OMAP_ALIGNED;
 163};
 164
 165struct omap_sham_hmac_ctx {
 166	struct crypto_shash	*shash;
 167	u8			ipad[SHA512_BLOCK_SIZE] OMAP_ALIGNED;
 168	u8			opad[SHA512_BLOCK_SIZE] OMAP_ALIGNED;
 169};
 170
 171struct omap_sham_ctx {
 172	struct crypto_engine_ctx	enginectx;
 173	unsigned long		flags;
 174
 175	/* fallback stuff */
 176	struct crypto_shash	*fallback;
 177
 178	struct omap_sham_hmac_ctx base[];
 179};
 180
 181#define OMAP_SHAM_QUEUE_LENGTH	10
 182
 183struct omap_sham_algs_info {
 184	struct ahash_alg	*algs_list;
 185	unsigned int		size;
 186	unsigned int		registered;
 187};
 188
 189struct omap_sham_pdata {
 190	struct omap_sham_algs_info	*algs_info;
 191	unsigned int	algs_info_size;
 192	unsigned long	flags;
 193	int		digest_size;
 194
 195	void		(*copy_hash)(struct ahash_request *req, int out);
 196	void		(*write_ctrl)(struct omap_sham_dev *dd, size_t length,
 197				      int final, int dma);
 198	void		(*trigger)(struct omap_sham_dev *dd, size_t length);
 199	int		(*poll_irq)(struct omap_sham_dev *dd);
 200	irqreturn_t	(*intr_hdlr)(int irq, void *dev_id);
 201
 202	u32		odigest_ofs;
 203	u32		idigest_ofs;
 204	u32		din_ofs;
 205	u32		digcnt_ofs;
 206	u32		rev_ofs;
 207	u32		mask_ofs;
 208	u32		sysstatus_ofs;
 209	u32		mode_ofs;
 210	u32		length_ofs;
 211
 212	u32		major_mask;
 213	u32		major_shift;
 214	u32		minor_mask;
 215	u32		minor_shift;
 216};
 217
 218struct omap_sham_dev {
 219	struct list_head	list;
 220	unsigned long		phys_base;
 221	struct device		*dev;
 222	void __iomem		*io_base;
 223	int			irq;
 
 224	int			err;
 225	struct dma_chan		*dma_lch;
 226	struct tasklet_struct	done_task;
 227	u8			polling_mode;
 228	u8			xmit_buf[BUFLEN] OMAP_ALIGNED;
 229
 230	unsigned long		flags;
 231	int			fallback_sz;
 232	struct crypto_queue	queue;
 233	struct ahash_request	*req;
 234	struct crypto_engine	*engine;
 235
 236	const struct omap_sham_pdata	*pdata;
 237};
 238
 239struct omap_sham_drv {
 240	struct list_head	dev_list;
 241	spinlock_t		lock;
 242	unsigned long		flags;
 243};
 244
 245static struct omap_sham_drv sham = {
 246	.dev_list = LIST_HEAD_INIT(sham.dev_list),
 247	.lock = __SPIN_LOCK_UNLOCKED(sham.lock),
 248};
 249
 250static int omap_sham_enqueue(struct ahash_request *req, unsigned int op);
 251static void omap_sham_finish_req(struct ahash_request *req, int err);
 252
 253static inline u32 omap_sham_read(struct omap_sham_dev *dd, u32 offset)
 254{
 255	return __raw_readl(dd->io_base + offset);
 256}
 257
 258static inline void omap_sham_write(struct omap_sham_dev *dd,
 259					u32 offset, u32 value)
 260{
 261	__raw_writel(value, dd->io_base + offset);
 262}
 263
 264static inline void omap_sham_write_mask(struct omap_sham_dev *dd, u32 address,
 265					u32 value, u32 mask)
 266{
 267	u32 val;
 268
 269	val = omap_sham_read(dd, address);
 270	val &= ~mask;
 271	val |= value;
 272	omap_sham_write(dd, address, val);
 273}
 274
 275static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit)
 276{
 277	unsigned long timeout = jiffies + DEFAULT_TIMEOUT_INTERVAL;
 278
 279	while (!(omap_sham_read(dd, offset) & bit)) {
 280		if (time_is_before_jiffies(timeout))
 281			return -ETIMEDOUT;
 282	}
 283
 284	return 0;
 285}
 286
 287static void omap_sham_copy_hash_omap2(struct ahash_request *req, int out)
 288{
 289	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 290	struct omap_sham_dev *dd = ctx->dd;
 291	u32 *hash = (u32 *)ctx->digest;
 292	int i;
 293
 294	for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) {
 295		if (out)
 296			hash[i] = omap_sham_read(dd, SHA_REG_IDIGEST(dd, i));
 297		else
 298			omap_sham_write(dd, SHA_REG_IDIGEST(dd, i), hash[i]);
 299	}
 300}
 301
 302static void omap_sham_copy_hash_omap4(struct ahash_request *req, int out)
 303{
 304	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 305	struct omap_sham_dev *dd = ctx->dd;
 306	int i;
 307
 308	if (ctx->flags & BIT(FLAGS_HMAC)) {
 309		struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req);
 310		struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
 311		struct omap_sham_hmac_ctx *bctx = tctx->base;
 312		u32 *opad = (u32 *)bctx->opad;
 313
 314		for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) {
 315			if (out)
 316				opad[i] = omap_sham_read(dd,
 317						SHA_REG_ODIGEST(dd, i));
 318			else
 319				omap_sham_write(dd, SHA_REG_ODIGEST(dd, i),
 320						opad[i]);
 321		}
 322	}
 323
 324	omap_sham_copy_hash_omap2(req, out);
 325}
 326
 327static void omap_sham_copy_ready_hash(struct ahash_request *req)
 328{
 329	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 330	u32 *in = (u32 *)ctx->digest;
 331	u32 *hash = (u32 *)req->result;
 332	int i, d, big_endian = 0;
 333
 334	if (!hash)
 335		return;
 336
 337	switch (ctx->flags & FLAGS_MODE_MASK) {
 338	case FLAGS_MODE_MD5:
 339		d = MD5_DIGEST_SIZE / sizeof(u32);
 340		break;
 341	case FLAGS_MODE_SHA1:
 342		/* OMAP2 SHA1 is big endian */
 343		if (test_bit(FLAGS_BE32_SHA1, &ctx->dd->flags))
 344			big_endian = 1;
 345		d = SHA1_DIGEST_SIZE / sizeof(u32);
 346		break;
 347	case FLAGS_MODE_SHA224:
 348		d = SHA224_DIGEST_SIZE / sizeof(u32);
 349		break;
 350	case FLAGS_MODE_SHA256:
 351		d = SHA256_DIGEST_SIZE / sizeof(u32);
 352		break;
 353	case FLAGS_MODE_SHA384:
 354		d = SHA384_DIGEST_SIZE / sizeof(u32);
 355		break;
 356	case FLAGS_MODE_SHA512:
 357		d = SHA512_DIGEST_SIZE / sizeof(u32);
 358		break;
 359	default:
 360		d = 0;
 361	}
 362
 363	if (big_endian)
 364		for (i = 0; i < d; i++)
 365			hash[i] = be32_to_cpup((__be32 *)in + i);
 366	else
 367		for (i = 0; i < d; i++)
 368			hash[i] = le32_to_cpup((__le32 *)in + i);
 369}
 370
 371static int omap_sham_hw_init(struct omap_sham_dev *dd)
 372{
 373	int err;
 374
 375	err = pm_runtime_resume_and_get(dd->dev);
 376	if (err < 0) {
 377		dev_err(dd->dev, "failed to get sync: %d\n", err);
 378		return err;
 379	}
 380
 381	if (!test_bit(FLAGS_INIT, &dd->flags)) {
 382		set_bit(FLAGS_INIT, &dd->flags);
 383		dd->err = 0;
 384	}
 385
 386	return 0;
 387}
 388
 389static void omap_sham_write_ctrl_omap2(struct omap_sham_dev *dd, size_t length,
 390				 int final, int dma)
 391{
 392	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 393	u32 val = length << 5, mask;
 394
 395	if (likely(ctx->digcnt))
 396		omap_sham_write(dd, SHA_REG_DIGCNT(dd), ctx->digcnt);
 397
 398	omap_sham_write_mask(dd, SHA_REG_MASK(dd),
 399		SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0),
 400		SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
 401	/*
 402	 * Setting ALGO_CONST only for the first iteration
 403	 * and CLOSE_HASH only for the last one.
 404	 */
 405	if ((ctx->flags & FLAGS_MODE_MASK) == FLAGS_MODE_SHA1)
 406		val |= SHA_REG_CTRL_ALGO;
 407	if (!ctx->digcnt)
 408		val |= SHA_REG_CTRL_ALGO_CONST;
 409	if (final)
 410		val |= SHA_REG_CTRL_CLOSE_HASH;
 411
 412	mask = SHA_REG_CTRL_ALGO_CONST | SHA_REG_CTRL_CLOSE_HASH |
 413			SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH;
 414
 415	omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask);
 416}
 417
 418static void omap_sham_trigger_omap2(struct omap_sham_dev *dd, size_t length)
 419{
 420}
 421
 422static int omap_sham_poll_irq_omap2(struct omap_sham_dev *dd)
 423{
 424	return omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY);
 425}
 426
 427static int get_block_size(struct omap_sham_reqctx *ctx)
 428{
 429	int d;
 430
 431	switch (ctx->flags & FLAGS_MODE_MASK) {
 432	case FLAGS_MODE_MD5:
 433	case FLAGS_MODE_SHA1:
 434		d = SHA1_BLOCK_SIZE;
 435		break;
 436	case FLAGS_MODE_SHA224:
 437	case FLAGS_MODE_SHA256:
 438		d = SHA256_BLOCK_SIZE;
 439		break;
 440	case FLAGS_MODE_SHA384:
 441	case FLAGS_MODE_SHA512:
 442		d = SHA512_BLOCK_SIZE;
 443		break;
 444	default:
 445		d = 0;
 446	}
 447
 448	return d;
 449}
 450
 451static void omap_sham_write_n(struct omap_sham_dev *dd, u32 offset,
 452				    u32 *value, int count)
 453{
 454	for (; count--; value++, offset += 4)
 455		omap_sham_write(dd, offset, *value);
 456}
 457
 458static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length,
 459				 int final, int dma)
 460{
 461	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 462	u32 val, mask;
 463
 464	if (likely(ctx->digcnt))
 465		omap_sham_write(dd, SHA_REG_DIGCNT(dd), ctx->digcnt);
 466
 467	/*
 468	 * Setting ALGO_CONST only for the first iteration and
 469	 * CLOSE_HASH only for the last one. Note that flags mode bits
 470	 * correspond to algorithm encoding in mode register.
 471	 */
 472	val = (ctx->flags & FLAGS_MODE_MASK) >> (FLAGS_MODE_SHIFT);
 473	if (!ctx->digcnt) {
 474		struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req);
 475		struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
 476		struct omap_sham_hmac_ctx *bctx = tctx->base;
 477		int bs, nr_dr;
 478
 479		val |= SHA_REG_MODE_ALGO_CONSTANT;
 480
 481		if (ctx->flags & BIT(FLAGS_HMAC)) {
 482			bs = get_block_size(ctx);
 483			nr_dr = bs / (2 * sizeof(u32));
 484			val |= SHA_REG_MODE_HMAC_KEY_PROC;
 485			omap_sham_write_n(dd, SHA_REG_ODIGEST(dd, 0),
 486					  (u32 *)bctx->ipad, nr_dr);
 487			omap_sham_write_n(dd, SHA_REG_IDIGEST(dd, 0),
 488					  (u32 *)bctx->ipad + nr_dr, nr_dr);
 489			ctx->digcnt += bs;
 490		}
 491	}
 492
 493	if (final) {
 494		val |= SHA_REG_MODE_CLOSE_HASH;
 495
 496		if (ctx->flags & BIT(FLAGS_HMAC))
 497			val |= SHA_REG_MODE_HMAC_OUTER_HASH;
 498	}
 499
 500	mask = SHA_REG_MODE_ALGO_CONSTANT | SHA_REG_MODE_CLOSE_HASH |
 501	       SHA_REG_MODE_ALGO_MASK | SHA_REG_MODE_HMAC_OUTER_HASH |
 502	       SHA_REG_MODE_HMAC_KEY_PROC;
 503
 504	dev_dbg(dd->dev, "ctrl: %08x, flags: %08lx\n", val, ctx->flags);
 505	omap_sham_write_mask(dd, SHA_REG_MODE(dd), val, mask);
 506	omap_sham_write(dd, SHA_REG_IRQENA, SHA_REG_IRQENA_OUTPUT_RDY);
 507	omap_sham_write_mask(dd, SHA_REG_MASK(dd),
 508			     SHA_REG_MASK_IT_EN |
 509				     (dma ? SHA_REG_MASK_DMA_EN : 0),
 510			     SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
 511}
 512
 513static void omap_sham_trigger_omap4(struct omap_sham_dev *dd, size_t length)
 514{
 515	omap_sham_write(dd, SHA_REG_LENGTH(dd), length);
 516}
 517
 518static int omap_sham_poll_irq_omap4(struct omap_sham_dev *dd)
 519{
 520	return omap_sham_wait(dd, SHA_REG_IRQSTATUS,
 521			      SHA_REG_IRQSTATUS_INPUT_RDY);
 522}
 523
 524static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, size_t length,
 525			      int final)
 526{
 527	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 528	int count, len32, bs32, offset = 0;
 529	const u32 *buffer;
 530	int mlen;
 531	struct sg_mapping_iter mi;
 532
 533	dev_dbg(dd->dev, "xmit_cpu: digcnt: %zd, length: %zd, final: %d\n",
 534						ctx->digcnt, length, final);
 535
 536	dd->pdata->write_ctrl(dd, length, final, 0);
 537	dd->pdata->trigger(dd, length);
 538
 539	/* should be non-zero before next lines to disable clocks later */
 540	ctx->digcnt += length;
 541	ctx->total -= length;
 542
 543	if (final)
 544		set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
 545
 546	set_bit(FLAGS_CPU, &dd->flags);
 547
 548	len32 = DIV_ROUND_UP(length, sizeof(u32));
 549	bs32 = get_block_size(ctx) / sizeof(u32);
 550
 551	sg_miter_start(&mi, ctx->sg, ctx->sg_len,
 552		       SG_MITER_FROM_SG | SG_MITER_ATOMIC);
 553
 554	mlen = 0;
 555
 556	while (len32) {
 557		if (dd->pdata->poll_irq(dd))
 558			return -ETIMEDOUT;
 559
 560		for (count = 0; count < min(len32, bs32); count++, offset++) {
 561			if (!mlen) {
 562				sg_miter_next(&mi);
 563				mlen = mi.length;
 564				if (!mlen) {
 565					pr_err("sg miter failure.\n");
 566					return -EINVAL;
 567				}
 568				offset = 0;
 569				buffer = mi.addr;
 570			}
 571			omap_sham_write(dd, SHA_REG_DIN(dd, count),
 572					buffer[offset]);
 573			mlen -= 4;
 574		}
 575		len32 -= min(len32, bs32);
 576	}
 577
 578	sg_miter_stop(&mi);
 579
 580	return -EINPROGRESS;
 581}
 582
 583static void omap_sham_dma_callback(void *param)
 584{
 585	struct omap_sham_dev *dd = param;
 586
 587	set_bit(FLAGS_DMA_READY, &dd->flags);
 588	tasklet_schedule(&dd->done_task);
 589}
 590
 591static int omap_sham_xmit_dma(struct omap_sham_dev *dd, size_t length,
 592			      int final)
 593{
 594	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 595	struct dma_async_tx_descriptor *tx;
 596	struct dma_slave_config cfg;
 597	int ret;
 598
 599	dev_dbg(dd->dev, "xmit_dma: digcnt: %zd, length: %zd, final: %d\n",
 600						ctx->digcnt, length, final);
 601
 602	if (!dma_map_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE)) {
 603		dev_err(dd->dev, "dma_map_sg error\n");
 604		return -EINVAL;
 605	}
 606
 607	memset(&cfg, 0, sizeof(cfg));
 608
 609	cfg.dst_addr = dd->phys_base + SHA_REG_DIN(dd, 0);
 610	cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 611	cfg.dst_maxburst = get_block_size(ctx) / DMA_SLAVE_BUSWIDTH_4_BYTES;
 612
 613	ret = dmaengine_slave_config(dd->dma_lch, &cfg);
 614	if (ret) {
 615		pr_err("omap-sham: can't configure dmaengine slave: %d\n", ret);
 616		return ret;
 617	}
 618
 619	tx = dmaengine_prep_slave_sg(dd->dma_lch, ctx->sg, ctx->sg_len,
 620				     DMA_MEM_TO_DEV,
 621				     DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 622
 623	if (!tx) {
 624		dev_err(dd->dev, "prep_slave_sg failed\n");
 625		return -EINVAL;
 626	}
 627
 628	tx->callback = omap_sham_dma_callback;
 629	tx->callback_param = dd;
 630
 631	dd->pdata->write_ctrl(dd, length, final, 1);
 632
 633	ctx->digcnt += length;
 634	ctx->total -= length;
 635
 636	if (final)
 637		set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
 638
 639	set_bit(FLAGS_DMA_ACTIVE, &dd->flags);
 640
 641	dmaengine_submit(tx);
 642	dma_async_issue_pending(dd->dma_lch);
 643
 644	dd->pdata->trigger(dd, length);
 645
 646	return -EINPROGRESS;
 647}
 648
 649static int omap_sham_copy_sg_lists(struct omap_sham_reqctx *ctx,
 650				   struct scatterlist *sg, int bs, int new_len)
 651{
 652	int n = sg_nents(sg);
 653	struct scatterlist *tmp;
 654	int offset = ctx->offset;
 655
 656	ctx->total = new_len;
 657
 658	if (ctx->bufcnt)
 659		n++;
 660
 661	ctx->sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL);
 662	if (!ctx->sg)
 663		return -ENOMEM;
 664
 665	sg_init_table(ctx->sg, n);
 666
 667	tmp = ctx->sg;
 668
 669	ctx->sg_len = 0;
 670
 671	if (ctx->bufcnt) {
 672		sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt);
 673		tmp = sg_next(tmp);
 674		ctx->sg_len++;
 675		new_len -= ctx->bufcnt;
 676	}
 677
 678	while (sg && new_len) {
 679		int len = sg->length - offset;
 680
 681		if (len <= 0) {
 682			offset -= sg->length;
 683			sg = sg_next(sg);
 684			continue;
 685		}
 686
 687		if (new_len < len)
 688			len = new_len;
 689
 690		if (len > 0) {
 691			new_len -= len;
 692			sg_set_page(tmp, sg_page(sg), len, sg->offset + offset);
 693			offset = 0;
 694			ctx->offset = 0;
 695			ctx->sg_len++;
 696			if (new_len <= 0)
 697				break;
 698			tmp = sg_next(tmp);
 699		}
 700
 701		sg = sg_next(sg);
 702	}
 703
 704	if (tmp)
 705		sg_mark_end(tmp);
 706
 707	set_bit(FLAGS_SGS_ALLOCED, &ctx->dd->flags);
 708
 709	ctx->offset += new_len - ctx->bufcnt;
 710	ctx->bufcnt = 0;
 711
 712	return 0;
 713}
 714
 715static int omap_sham_copy_sgs(struct omap_sham_reqctx *ctx,
 716			      struct scatterlist *sg, int bs,
 717			      unsigned int new_len)
 718{
 719	int pages;
 720	void *buf;
 721
 722	pages = get_order(new_len);
 723
 724	buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
 725	if (!buf) {
 726		pr_err("Couldn't allocate pages for unaligned cases.\n");
 727		return -ENOMEM;
 728	}
 729
 730	if (ctx->bufcnt)
 731		memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt);
 732
 733	scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->offset,
 734				 min(new_len, ctx->total) - ctx->bufcnt, 0);
 735	sg_init_table(ctx->sgl, 1);
 736	sg_set_buf(ctx->sgl, buf, new_len);
 737	ctx->sg = ctx->sgl;
 738	set_bit(FLAGS_SGS_COPIED, &ctx->dd->flags);
 739	ctx->sg_len = 1;
 740	ctx->offset += new_len - ctx->bufcnt;
 741	ctx->bufcnt = 0;
 742	ctx->total = new_len;
 743
 744	return 0;
 745}
 746
 747static int omap_sham_align_sgs(struct scatterlist *sg,
 748			       int nbytes, int bs, bool final,
 749			       struct omap_sham_reqctx *rctx)
 750{
 751	int n = 0;
 752	bool aligned = true;
 753	bool list_ok = true;
 754	struct scatterlist *sg_tmp = sg;
 755	int new_len;
 756	int offset = rctx->offset;
 757	int bufcnt = rctx->bufcnt;
 758
 759	if (!sg || !sg->length || !nbytes) {
 760		if (bufcnt) {
 761			bufcnt = DIV_ROUND_UP(bufcnt, bs) * bs;
 762			sg_init_table(rctx->sgl, 1);
 763			sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, bufcnt);
 764			rctx->sg = rctx->sgl;
 765			rctx->sg_len = 1;
 766		}
 767
 768		return 0;
 769	}
 770
 771	new_len = nbytes;
 772
 773	if (offset)
 774		list_ok = false;
 775
 776	if (final)
 777		new_len = DIV_ROUND_UP(new_len, bs) * bs;
 778	else
 779		new_len = (new_len - 1) / bs * bs;
 780
 781	if (!new_len)
 782		return 0;
 783
 784	if (nbytes != new_len)
 785		list_ok = false;
 786
 787	while (nbytes > 0 && sg_tmp) {
 788		n++;
 789
 790		if (bufcnt) {
 791			if (!IS_ALIGNED(bufcnt, bs)) {
 792				aligned = false;
 793				break;
 794			}
 795			nbytes -= bufcnt;
 796			bufcnt = 0;
 797			if (!nbytes)
 798				list_ok = false;
 799
 800			continue;
 801		}
 802
 803#ifdef CONFIG_ZONE_DMA
 804		if (page_zonenum(sg_page(sg_tmp)) != ZONE_DMA) {
 805			aligned = false;
 806			break;
 807		}
 808#endif
 809
 810		if (offset < sg_tmp->length) {
 811			if (!IS_ALIGNED(offset + sg_tmp->offset, 4)) {
 812				aligned = false;
 813				break;
 814			}
 815
 816			if (!IS_ALIGNED(sg_tmp->length - offset, bs)) {
 817				aligned = false;
 818				break;
 819			}
 820		}
 821
 822		if (offset) {
 823			offset -= sg_tmp->length;
 824			if (offset < 0) {
 825				nbytes += offset;
 826				offset = 0;
 827			}
 828		} else {
 829			nbytes -= sg_tmp->length;
 830		}
 831
 832		sg_tmp = sg_next(sg_tmp);
 833
 834		if (nbytes < 0) {
 835			list_ok = false;
 836			break;
 837		}
 838	}
 839
 840	if (new_len > OMAP_SHA_MAX_DMA_LEN) {
 841		new_len = OMAP_SHA_MAX_DMA_LEN;
 842		aligned = false;
 843	}
 844
 845	if (!aligned)
 846		return omap_sham_copy_sgs(rctx, sg, bs, new_len);
 847	else if (!list_ok)
 848		return omap_sham_copy_sg_lists(rctx, sg, bs, new_len);
 849
 850	rctx->total = new_len;
 851	rctx->offset += new_len;
 852	rctx->sg_len = n;
 853	if (rctx->bufcnt) {
 854		sg_init_table(rctx->sgl, 2);
 855		sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, rctx->bufcnt);
 856		sg_chain(rctx->sgl, 2, sg);
 857		rctx->sg = rctx->sgl;
 858	} else {
 859		rctx->sg = sg;
 860	}
 861
 862	return 0;
 863}
 864
 865static int omap_sham_prepare_request(struct crypto_engine *engine, void *areq)
 866{
 867	struct ahash_request *req = container_of(areq, struct ahash_request,
 868						 base);
 869	struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
 870	int bs;
 871	int ret;
 872	unsigned int nbytes;
 873	bool final = rctx->flags & BIT(FLAGS_FINUP);
 874	bool update = rctx->op == OP_UPDATE;
 875	int hash_later;
 876
 877	bs = get_block_size(rctx);
 878
 879	nbytes = rctx->bufcnt;
 880
 881	if (update)
 882		nbytes += req->nbytes - rctx->offset;
 883
 884	dev_dbg(rctx->dd->dev,
 885		"%s: nbytes=%d, bs=%d, total=%d, offset=%d, bufcnt=%zd\n",
 886		__func__, nbytes, bs, rctx->total, rctx->offset,
 887		rctx->bufcnt);
 888
 889	if (!nbytes)
 890		return 0;
 891
 892	rctx->total = nbytes;
 893
 894	if (update && req->nbytes && (!IS_ALIGNED(rctx->bufcnt, bs))) {
 895		int len = bs - rctx->bufcnt % bs;
 896
 897		if (len > req->nbytes)
 898			len = req->nbytes;
 899		scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, req->src,
 900					 0, len, 0);
 901		rctx->bufcnt += len;
 902		rctx->offset = len;
 903	}
 904
 905	if (rctx->bufcnt)
 906		memcpy(rctx->dd->xmit_buf, rctx->buffer, rctx->bufcnt);
 907
 908	ret = omap_sham_align_sgs(req->src, nbytes, bs, final, rctx);
 909	if (ret)
 910		return ret;
 911
 912	hash_later = nbytes - rctx->total;
 913	if (hash_later < 0)
 914		hash_later = 0;
 915
 916	if (hash_later && hash_later <= rctx->buflen) {
 917		scatterwalk_map_and_copy(rctx->buffer,
 918					 req->src,
 919					 req->nbytes - hash_later,
 920					 hash_later, 0);
 921
 922		rctx->bufcnt = hash_later;
 923	} else {
 924		rctx->bufcnt = 0;
 925	}
 926
 927	if (hash_later > rctx->buflen)
 928		set_bit(FLAGS_HUGE, &rctx->dd->flags);
 929
 930	rctx->total = min(nbytes, rctx->total);
 931
 932	return 0;
 933}
 934
 935static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
 936{
 937	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 938
 939	dma_unmap_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
 940
 941	clear_bit(FLAGS_DMA_ACTIVE, &dd->flags);
 942
 943	return 0;
 944}
 945
 946static struct omap_sham_dev *omap_sham_find_dev(struct omap_sham_reqctx *ctx)
 947{
 948	struct omap_sham_dev *dd;
 949
 950	if (ctx->dd)
 951		return ctx->dd;
 952
 953	spin_lock_bh(&sham.lock);
 954	dd = list_first_entry(&sham.dev_list, struct omap_sham_dev, list);
 955	list_move_tail(&dd->list, &sham.dev_list);
 956	ctx->dd = dd;
 957	spin_unlock_bh(&sham.lock);
 958
 959	return dd;
 960}
 961
 962static int omap_sham_init(struct ahash_request *req)
 963{
 964	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 965	struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
 966	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 967	struct omap_sham_dev *dd;
 968	int bs = 0;
 969
 970	ctx->dd = NULL;
 971
 972	dd = omap_sham_find_dev(ctx);
 973	if (!dd)
 974		return -ENODEV;
 975
 976	ctx->flags = 0;
 977
 978	dev_dbg(dd->dev, "init: digest size: %d\n",
 979		crypto_ahash_digestsize(tfm));
 980
 981	switch (crypto_ahash_digestsize(tfm)) {
 982	case MD5_DIGEST_SIZE:
 983		ctx->flags |= FLAGS_MODE_MD5;
 984		bs = SHA1_BLOCK_SIZE;
 985		break;
 986	case SHA1_DIGEST_SIZE:
 987		ctx->flags |= FLAGS_MODE_SHA1;
 988		bs = SHA1_BLOCK_SIZE;
 989		break;
 990	case SHA224_DIGEST_SIZE:
 991		ctx->flags |= FLAGS_MODE_SHA224;
 992		bs = SHA224_BLOCK_SIZE;
 993		break;
 994	case SHA256_DIGEST_SIZE:
 995		ctx->flags |= FLAGS_MODE_SHA256;
 996		bs = SHA256_BLOCK_SIZE;
 997		break;
 998	case SHA384_DIGEST_SIZE:
 999		ctx->flags |= FLAGS_MODE_SHA384;
1000		bs = SHA384_BLOCK_SIZE;
1001		break;
1002	case SHA512_DIGEST_SIZE:
1003		ctx->flags |= FLAGS_MODE_SHA512;
1004		bs = SHA512_BLOCK_SIZE;
1005		break;
1006	}
1007
1008	ctx->bufcnt = 0;
1009	ctx->digcnt = 0;
1010	ctx->total = 0;
1011	ctx->offset = 0;
1012	ctx->buflen = BUFLEN;
1013
1014	if (tctx->flags & BIT(FLAGS_HMAC)) {
1015		if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) {
1016			struct omap_sham_hmac_ctx *bctx = tctx->base;
1017
1018			memcpy(ctx->buffer, bctx->ipad, bs);
1019			ctx->bufcnt = bs;
1020		}
1021
1022		ctx->flags |= BIT(FLAGS_HMAC);
1023	}
1024
1025	return 0;
1026
1027}
1028
1029static int omap_sham_update_req(struct omap_sham_dev *dd)
1030{
1031	struct ahash_request *req = dd->req;
1032	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1033	int err;
1034	bool final = (ctx->flags & BIT(FLAGS_FINUP)) &&
1035		!(dd->flags & BIT(FLAGS_HUGE));
1036
1037	dev_dbg(dd->dev, "update_req: total: %u, digcnt: %zd, final: %d",
1038		ctx->total, ctx->digcnt, final);
1039
1040	if (ctx->total < get_block_size(ctx) ||
1041	    ctx->total < dd->fallback_sz)
1042		ctx->flags |= BIT(FLAGS_CPU);
1043
1044	if (ctx->flags & BIT(FLAGS_CPU))
1045		err = omap_sham_xmit_cpu(dd, ctx->total, final);
1046	else
1047		err = omap_sham_xmit_dma(dd, ctx->total, final);
1048
1049	/* wait for dma completion before can take more data */
1050	dev_dbg(dd->dev, "update: err: %d, digcnt: %zd\n", err, ctx->digcnt);
1051
1052	return err;
1053}
1054
1055static int omap_sham_final_req(struct omap_sham_dev *dd)
1056{
1057	struct ahash_request *req = dd->req;
1058	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1059	int err = 0, use_dma = 1;
1060
1061	if (dd->flags & BIT(FLAGS_HUGE))
1062		return 0;
1063
1064	if ((ctx->total <= get_block_size(ctx)) || dd->polling_mode)
1065		/*
1066		 * faster to handle last block with cpu or
1067		 * use cpu when dma is not present.
1068		 */
1069		use_dma = 0;
1070
1071	if (use_dma)
1072		err = omap_sham_xmit_dma(dd, ctx->total, 1);
1073	else
1074		err = omap_sham_xmit_cpu(dd, ctx->total, 1);
1075
1076	ctx->bufcnt = 0;
1077
1078	dev_dbg(dd->dev, "final_req: err: %d\n", err);
1079
1080	return err;
1081}
1082
1083static int omap_sham_hash_one_req(struct crypto_engine *engine, void *areq)
1084{
1085	struct ahash_request *req = container_of(areq, struct ahash_request,
1086						 base);
1087	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1088	struct omap_sham_dev *dd = ctx->dd;
1089	int err;
1090	bool final = (ctx->flags & BIT(FLAGS_FINUP)) &&
1091			!(dd->flags & BIT(FLAGS_HUGE));
1092
1093	dev_dbg(dd->dev, "hash-one: op: %u, total: %u, digcnt: %zd, final: %d",
1094		ctx->op, ctx->total, ctx->digcnt, final);
1095
1096	dd->req = req;
1097
1098	err = omap_sham_hw_init(dd);
1099	if (err)
1100		return err;
1101
1102	if (ctx->digcnt)
1103		dd->pdata->copy_hash(req, 0);
1104
1105	if (ctx->op == OP_UPDATE)
1106		err = omap_sham_update_req(dd);
1107	else if (ctx->op == OP_FINAL)
1108		err = omap_sham_final_req(dd);
1109
1110	if (err != -EINPROGRESS)
1111		omap_sham_finish_req(req, err);
1112
1113	return 0;
1114}
1115
1116static int omap_sham_finish_hmac(struct ahash_request *req)
1117{
1118	struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1119	struct omap_sham_hmac_ctx *bctx = tctx->base;
1120	int bs = crypto_shash_blocksize(bctx->shash);
1121	int ds = crypto_shash_digestsize(bctx->shash);
1122	SHASH_DESC_ON_STACK(shash, bctx->shash);
1123
1124	shash->tfm = bctx->shash;
1125
1126	return crypto_shash_init(shash) ?:
1127	       crypto_shash_update(shash, bctx->opad, bs) ?:
1128	       crypto_shash_finup(shash, req->result, ds, req->result);
1129}
1130
1131static int omap_sham_finish(struct ahash_request *req)
1132{
1133	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1134	struct omap_sham_dev *dd = ctx->dd;
1135	int err = 0;
1136
1137	if (ctx->digcnt) {
1138		omap_sham_copy_ready_hash(req);
1139		if ((ctx->flags & BIT(FLAGS_HMAC)) &&
1140				!test_bit(FLAGS_AUTO_XOR, &dd->flags))
1141			err = omap_sham_finish_hmac(req);
1142	}
1143
1144	dev_dbg(dd->dev, "digcnt: %zd, bufcnt: %zd\n", ctx->digcnt, ctx->bufcnt);
1145
1146	return err;
1147}
1148
1149static void omap_sham_finish_req(struct ahash_request *req, int err)
1150{
1151	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1152	struct omap_sham_dev *dd = ctx->dd;
1153
1154	if (test_bit(FLAGS_SGS_COPIED, &dd->flags))
1155		free_pages((unsigned long)sg_virt(ctx->sg),
1156			   get_order(ctx->sg->length));
1157
1158	if (test_bit(FLAGS_SGS_ALLOCED, &dd->flags))
1159		kfree(ctx->sg);
1160
1161	ctx->sg = NULL;
1162
1163	dd->flags &= ~(BIT(FLAGS_SGS_ALLOCED) | BIT(FLAGS_SGS_COPIED) |
1164		       BIT(FLAGS_CPU) | BIT(FLAGS_DMA_READY) |
1165		       BIT(FLAGS_OUTPUT_READY));
1166
1167	if (!err)
1168		dd->pdata->copy_hash(req, 1);
1169
1170	if (dd->flags & BIT(FLAGS_HUGE)) {
1171		/* Re-enqueue the request */
1172		omap_sham_enqueue(req, ctx->op);
 
 
 
 
 
 
 
 
 
1173		return;
1174	}
1175
1176	if (!err) {
 
1177		if (test_bit(FLAGS_FINAL, &dd->flags))
1178			err = omap_sham_finish(req);
1179	} else {
1180		ctx->flags |= BIT(FLAGS_ERROR);
1181	}
1182
1183	/* atomic operation is not needed here */
1184	dd->flags &= ~(BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) |
1185			BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY));
1186
1187	pm_runtime_mark_last_busy(dd->dev);
1188	pm_runtime_put_autosuspend(dd->dev);
1189
1190	ctx->offset = 0;
1191
1192	crypto_finalize_hash_request(dd->engine, req, err);
 
1193}
1194
1195static int omap_sham_handle_queue(struct omap_sham_dev *dd,
1196				  struct ahash_request *req)
1197{
1198	return crypto_transfer_hash_request_to_engine(dd->engine, req);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1199}
1200
1201static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
1202{
1203	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1204	struct omap_sham_dev *dd = ctx->dd;
1205
1206	ctx->op = op;
1207
1208	return omap_sham_handle_queue(dd, req);
1209}
1210
1211static int omap_sham_update(struct ahash_request *req)
1212{
1213	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1214	struct omap_sham_dev *dd = omap_sham_find_dev(ctx);
1215
1216	if (!req->nbytes)
1217		return 0;
1218
1219	if (ctx->bufcnt + req->nbytes <= ctx->buflen) {
1220		scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
1221					 0, req->nbytes, 0);
1222		ctx->bufcnt += req->nbytes;
1223		return 0;
1224	}
1225
1226	if (dd->polling_mode)
1227		ctx->flags |= BIT(FLAGS_CPU);
1228
1229	return omap_sham_enqueue(req, OP_UPDATE);
1230}
1231
1232static int omap_sham_final_shash(struct ahash_request *req)
1233{
1234	struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1235	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1236	int offset = 0;
1237
1238	/*
1239	 * If we are running HMAC on limited hardware support, skip
1240	 * the ipad in the beginning of the buffer if we are going for
1241	 * software fallback algorithm.
1242	 */
1243	if (test_bit(FLAGS_HMAC, &ctx->flags) &&
1244	    !test_bit(FLAGS_AUTO_XOR, &ctx->dd->flags))
1245		offset = get_block_size(ctx);
1246
1247	return crypto_shash_tfm_digest(tctx->fallback, ctx->buffer + offset,
1248				       ctx->bufcnt - offset, req->result);
1249}
1250
1251static int omap_sham_final(struct ahash_request *req)
1252{
1253	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1254
1255	ctx->flags |= BIT(FLAGS_FINUP);
1256
1257	if (ctx->flags & BIT(FLAGS_ERROR))
1258		return 0; /* uncompleted hash is not needed */
1259
1260	/*
1261	 * OMAP HW accel works only with buffers >= 9.
1262	 * HMAC is always >= 9 because ipad == block size.
1263	 * If buffersize is less than fallback_sz, we use fallback
1264	 * SW encoding, as using DMA + HW in this case doesn't provide
1265	 * any benefit.
1266	 */
1267	if (!ctx->digcnt && ctx->bufcnt < ctx->dd->fallback_sz)
1268		return omap_sham_final_shash(req);
1269	else if (ctx->bufcnt)
1270		return omap_sham_enqueue(req, OP_FINAL);
1271
1272	/* copy ready hash (+ finalize hmac) */
1273	return omap_sham_finish(req);
1274}
1275
1276static int omap_sham_finup(struct ahash_request *req)
1277{
1278	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1279	int err1, err2;
1280
1281	ctx->flags |= BIT(FLAGS_FINUP);
1282
1283	err1 = omap_sham_update(req);
1284	if (err1 == -EINPROGRESS || err1 == -EBUSY)
1285		return err1;
1286	/*
1287	 * final() has to be always called to cleanup resources
1288	 * even if udpate() failed, except EINPROGRESS
1289	 */
1290	err2 = omap_sham_final(req);
1291
1292	return err1 ?: err2;
1293}
1294
1295static int omap_sham_digest(struct ahash_request *req)
1296{
1297	return omap_sham_init(req) ?: omap_sham_finup(req);
1298}
1299
1300static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
1301		      unsigned int keylen)
1302{
1303	struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
1304	struct omap_sham_hmac_ctx *bctx = tctx->base;
1305	int bs = crypto_shash_blocksize(bctx->shash);
1306	int ds = crypto_shash_digestsize(bctx->shash);
1307	int err, i;
1308
1309	err = crypto_shash_setkey(tctx->fallback, key, keylen);
1310	if (err)
1311		return err;
1312
1313	if (keylen > bs) {
1314		err = crypto_shash_tfm_digest(bctx->shash, key, keylen,
1315					      bctx->ipad);
1316		if (err)
1317			return err;
1318		keylen = ds;
1319	} else {
1320		memcpy(bctx->ipad, key, keylen);
1321	}
1322
1323	memset(bctx->ipad + keylen, 0, bs - keylen);
1324
1325	if (!test_bit(FLAGS_AUTO_XOR, &sham.flags)) {
1326		memcpy(bctx->opad, bctx->ipad, bs);
1327
1328		for (i = 0; i < bs; i++) {
1329			bctx->ipad[i] ^= HMAC_IPAD_VALUE;
1330			bctx->opad[i] ^= HMAC_OPAD_VALUE;
1331		}
1332	}
1333
1334	return err;
1335}
1336
1337static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
1338{
1339	struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
1340	const char *alg_name = crypto_tfm_alg_name(tfm);
1341
1342	/* Allocate a fallback and abort if it failed. */
1343	tctx->fallback = crypto_alloc_shash(alg_name, 0,
1344					    CRYPTO_ALG_NEED_FALLBACK);
1345	if (IS_ERR(tctx->fallback)) {
1346		pr_err("omap-sham: fallback driver '%s' "
1347				"could not be loaded.\n", alg_name);
1348		return PTR_ERR(tctx->fallback);
1349	}
1350
1351	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1352				 sizeof(struct omap_sham_reqctx) + BUFLEN);
1353
1354	if (alg_base) {
1355		struct omap_sham_hmac_ctx *bctx = tctx->base;
1356		tctx->flags |= BIT(FLAGS_HMAC);
1357		bctx->shash = crypto_alloc_shash(alg_base, 0,
1358						CRYPTO_ALG_NEED_FALLBACK);
1359		if (IS_ERR(bctx->shash)) {
1360			pr_err("omap-sham: base driver '%s' "
1361					"could not be loaded.\n", alg_base);
1362			crypto_free_shash(tctx->fallback);
1363			return PTR_ERR(bctx->shash);
1364		}
1365
1366	}
1367
1368	tctx->enginectx.op.do_one_request = omap_sham_hash_one_req;
1369	tctx->enginectx.op.prepare_request = omap_sham_prepare_request;
1370	tctx->enginectx.op.unprepare_request = NULL;
1371
1372	return 0;
1373}
1374
1375static int omap_sham_cra_init(struct crypto_tfm *tfm)
1376{
1377	return omap_sham_cra_init_alg(tfm, NULL);
1378}
1379
1380static int omap_sham_cra_sha1_init(struct crypto_tfm *tfm)
1381{
1382	return omap_sham_cra_init_alg(tfm, "sha1");
1383}
1384
1385static int omap_sham_cra_sha224_init(struct crypto_tfm *tfm)
1386{
1387	return omap_sham_cra_init_alg(tfm, "sha224");
1388}
1389
1390static int omap_sham_cra_sha256_init(struct crypto_tfm *tfm)
1391{
1392	return omap_sham_cra_init_alg(tfm, "sha256");
1393}
1394
1395static int omap_sham_cra_md5_init(struct crypto_tfm *tfm)
1396{
1397	return omap_sham_cra_init_alg(tfm, "md5");
1398}
1399
1400static int omap_sham_cra_sha384_init(struct crypto_tfm *tfm)
1401{
1402	return omap_sham_cra_init_alg(tfm, "sha384");
1403}
1404
1405static int omap_sham_cra_sha512_init(struct crypto_tfm *tfm)
1406{
1407	return omap_sham_cra_init_alg(tfm, "sha512");
1408}
1409
1410static void omap_sham_cra_exit(struct crypto_tfm *tfm)
1411{
1412	struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
1413
1414	crypto_free_shash(tctx->fallback);
1415	tctx->fallback = NULL;
1416
1417	if (tctx->flags & BIT(FLAGS_HMAC)) {
1418		struct omap_sham_hmac_ctx *bctx = tctx->base;
1419		crypto_free_shash(bctx->shash);
1420	}
1421}
1422
1423static int omap_sham_export(struct ahash_request *req, void *out)
1424{
1425	struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
1426
1427	memcpy(out, rctx, sizeof(*rctx) + rctx->bufcnt);
1428
1429	return 0;
1430}
1431
1432static int omap_sham_import(struct ahash_request *req, const void *in)
1433{
1434	struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
1435	const struct omap_sham_reqctx *ctx_in = in;
1436
1437	memcpy(rctx, in, sizeof(*rctx) + ctx_in->bufcnt);
1438
1439	return 0;
1440}
1441
1442static struct ahash_alg algs_sha1_md5[] = {
1443{
1444	.init		= omap_sham_init,
1445	.update		= omap_sham_update,
1446	.final		= omap_sham_final,
1447	.finup		= omap_sham_finup,
1448	.digest		= omap_sham_digest,
1449	.halg.digestsize	= SHA1_DIGEST_SIZE,
1450	.halg.base	= {
1451		.cra_name		= "sha1",
1452		.cra_driver_name	= "omap-sha1",
1453		.cra_priority		= 400,
1454		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1455						CRYPTO_ALG_ASYNC |
1456						CRYPTO_ALG_NEED_FALLBACK,
1457		.cra_blocksize		= SHA1_BLOCK_SIZE,
1458		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
1459		.cra_alignmask		= OMAP_ALIGN_MASK,
1460		.cra_module		= THIS_MODULE,
1461		.cra_init		= omap_sham_cra_init,
1462		.cra_exit		= omap_sham_cra_exit,
1463	}
1464},
1465{
1466	.init		= omap_sham_init,
1467	.update		= omap_sham_update,
1468	.final		= omap_sham_final,
1469	.finup		= omap_sham_finup,
1470	.digest		= omap_sham_digest,
1471	.halg.digestsize	= MD5_DIGEST_SIZE,
1472	.halg.base	= {
1473		.cra_name		= "md5",
1474		.cra_driver_name	= "omap-md5",
1475		.cra_priority		= 400,
1476		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1477						CRYPTO_ALG_ASYNC |
1478						CRYPTO_ALG_NEED_FALLBACK,
1479		.cra_blocksize		= SHA1_BLOCK_SIZE,
1480		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
1481		.cra_alignmask		= OMAP_ALIGN_MASK,
1482		.cra_module		= THIS_MODULE,
1483		.cra_init		= omap_sham_cra_init,
1484		.cra_exit		= omap_sham_cra_exit,
1485	}
1486},
1487{
1488	.init		= omap_sham_init,
1489	.update		= omap_sham_update,
1490	.final		= omap_sham_final,
1491	.finup		= omap_sham_finup,
1492	.digest		= omap_sham_digest,
1493	.setkey		= omap_sham_setkey,
1494	.halg.digestsize	= SHA1_DIGEST_SIZE,
1495	.halg.base	= {
1496		.cra_name		= "hmac(sha1)",
1497		.cra_driver_name	= "omap-hmac-sha1",
1498		.cra_priority		= 400,
1499		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1500						CRYPTO_ALG_ASYNC |
1501						CRYPTO_ALG_NEED_FALLBACK,
1502		.cra_blocksize		= SHA1_BLOCK_SIZE,
1503		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
1504					sizeof(struct omap_sham_hmac_ctx),
1505		.cra_alignmask		= OMAP_ALIGN_MASK,
1506		.cra_module		= THIS_MODULE,
1507		.cra_init		= omap_sham_cra_sha1_init,
1508		.cra_exit		= omap_sham_cra_exit,
1509	}
1510},
1511{
1512	.init		= omap_sham_init,
1513	.update		= omap_sham_update,
1514	.final		= omap_sham_final,
1515	.finup		= omap_sham_finup,
1516	.digest		= omap_sham_digest,
1517	.setkey		= omap_sham_setkey,
1518	.halg.digestsize	= MD5_DIGEST_SIZE,
1519	.halg.base	= {
1520		.cra_name		= "hmac(md5)",
1521		.cra_driver_name	= "omap-hmac-md5",
1522		.cra_priority		= 400,
1523		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1524						CRYPTO_ALG_ASYNC |
1525						CRYPTO_ALG_NEED_FALLBACK,
1526		.cra_blocksize		= SHA1_BLOCK_SIZE,
1527		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
1528					sizeof(struct omap_sham_hmac_ctx),
1529		.cra_alignmask		= OMAP_ALIGN_MASK,
1530		.cra_module		= THIS_MODULE,
1531		.cra_init		= omap_sham_cra_md5_init,
1532		.cra_exit		= omap_sham_cra_exit,
1533	}
1534}
1535};
1536
1537/* OMAP4 has some algs in addition to what OMAP2 has */
1538static struct ahash_alg algs_sha224_sha256[] = {
1539{
1540	.init		= omap_sham_init,
1541	.update		= omap_sham_update,
1542	.final		= omap_sham_final,
1543	.finup		= omap_sham_finup,
1544	.digest		= omap_sham_digest,
1545	.halg.digestsize	= SHA224_DIGEST_SIZE,
1546	.halg.base	= {
1547		.cra_name		= "sha224",
1548		.cra_driver_name	= "omap-sha224",
1549		.cra_priority		= 400,
1550		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1551						CRYPTO_ALG_ASYNC |
1552						CRYPTO_ALG_NEED_FALLBACK,
1553		.cra_blocksize		= SHA224_BLOCK_SIZE,
1554		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
1555		.cra_alignmask		= OMAP_ALIGN_MASK,
1556		.cra_module		= THIS_MODULE,
1557		.cra_init		= omap_sham_cra_init,
1558		.cra_exit		= omap_sham_cra_exit,
1559	}
1560},
1561{
1562	.init		= omap_sham_init,
1563	.update		= omap_sham_update,
1564	.final		= omap_sham_final,
1565	.finup		= omap_sham_finup,
1566	.digest		= omap_sham_digest,
1567	.halg.digestsize	= SHA256_DIGEST_SIZE,
1568	.halg.base	= {
1569		.cra_name		= "sha256",
1570		.cra_driver_name	= "omap-sha256",
1571		.cra_priority		= 400,
1572		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1573						CRYPTO_ALG_ASYNC |
1574						CRYPTO_ALG_NEED_FALLBACK,
1575		.cra_blocksize		= SHA256_BLOCK_SIZE,
1576		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
1577		.cra_alignmask		= OMAP_ALIGN_MASK,
1578		.cra_module		= THIS_MODULE,
1579		.cra_init		= omap_sham_cra_init,
1580		.cra_exit		= omap_sham_cra_exit,
1581	}
1582},
1583{
1584	.init		= omap_sham_init,
1585	.update		= omap_sham_update,
1586	.final		= omap_sham_final,
1587	.finup		= omap_sham_finup,
1588	.digest		= omap_sham_digest,
1589	.setkey		= omap_sham_setkey,
1590	.halg.digestsize	= SHA224_DIGEST_SIZE,
1591	.halg.base	= {
1592		.cra_name		= "hmac(sha224)",
1593		.cra_driver_name	= "omap-hmac-sha224",
1594		.cra_priority		= 400,
1595		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1596						CRYPTO_ALG_ASYNC |
1597						CRYPTO_ALG_NEED_FALLBACK,
1598		.cra_blocksize		= SHA224_BLOCK_SIZE,
1599		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
1600					sizeof(struct omap_sham_hmac_ctx),
1601		.cra_alignmask		= OMAP_ALIGN_MASK,
1602		.cra_module		= THIS_MODULE,
1603		.cra_init		= omap_sham_cra_sha224_init,
1604		.cra_exit		= omap_sham_cra_exit,
1605	}
1606},
1607{
1608	.init		= omap_sham_init,
1609	.update		= omap_sham_update,
1610	.final		= omap_sham_final,
1611	.finup		= omap_sham_finup,
1612	.digest		= omap_sham_digest,
1613	.setkey		= omap_sham_setkey,
1614	.halg.digestsize	= SHA256_DIGEST_SIZE,
1615	.halg.base	= {
1616		.cra_name		= "hmac(sha256)",
1617		.cra_driver_name	= "omap-hmac-sha256",
1618		.cra_priority		= 400,
1619		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1620						CRYPTO_ALG_ASYNC |
1621						CRYPTO_ALG_NEED_FALLBACK,
1622		.cra_blocksize		= SHA256_BLOCK_SIZE,
1623		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
1624					sizeof(struct omap_sham_hmac_ctx),
1625		.cra_alignmask		= OMAP_ALIGN_MASK,
1626		.cra_module		= THIS_MODULE,
1627		.cra_init		= omap_sham_cra_sha256_init,
1628		.cra_exit		= omap_sham_cra_exit,
1629	}
1630},
1631};
1632
1633static struct ahash_alg algs_sha384_sha512[] = {
1634{
1635	.init		= omap_sham_init,
1636	.update		= omap_sham_update,
1637	.final		= omap_sham_final,
1638	.finup		= omap_sham_finup,
1639	.digest		= omap_sham_digest,
1640	.halg.digestsize	= SHA384_DIGEST_SIZE,
1641	.halg.base	= {
1642		.cra_name		= "sha384",
1643		.cra_driver_name	= "omap-sha384",
1644		.cra_priority		= 400,
1645		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1646						CRYPTO_ALG_ASYNC |
1647						CRYPTO_ALG_NEED_FALLBACK,
1648		.cra_blocksize		= SHA384_BLOCK_SIZE,
1649		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
1650		.cra_alignmask		= OMAP_ALIGN_MASK,
1651		.cra_module		= THIS_MODULE,
1652		.cra_init		= omap_sham_cra_init,
1653		.cra_exit		= omap_sham_cra_exit,
1654	}
1655},
1656{
1657	.init		= omap_sham_init,
1658	.update		= omap_sham_update,
1659	.final		= omap_sham_final,
1660	.finup		= omap_sham_finup,
1661	.digest		= omap_sham_digest,
1662	.halg.digestsize	= SHA512_DIGEST_SIZE,
1663	.halg.base	= {
1664		.cra_name		= "sha512",
1665		.cra_driver_name	= "omap-sha512",
1666		.cra_priority		= 400,
1667		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1668						CRYPTO_ALG_ASYNC |
1669						CRYPTO_ALG_NEED_FALLBACK,
1670		.cra_blocksize		= SHA512_BLOCK_SIZE,
1671		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
1672		.cra_alignmask		= OMAP_ALIGN_MASK,
1673		.cra_module		= THIS_MODULE,
1674		.cra_init		= omap_sham_cra_init,
1675		.cra_exit		= omap_sham_cra_exit,
1676	}
1677},
1678{
1679	.init		= omap_sham_init,
1680	.update		= omap_sham_update,
1681	.final		= omap_sham_final,
1682	.finup		= omap_sham_finup,
1683	.digest		= omap_sham_digest,
1684	.setkey		= omap_sham_setkey,
1685	.halg.digestsize	= SHA384_DIGEST_SIZE,
1686	.halg.base	= {
1687		.cra_name		= "hmac(sha384)",
1688		.cra_driver_name	= "omap-hmac-sha384",
1689		.cra_priority		= 400,
1690		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1691						CRYPTO_ALG_ASYNC |
1692						CRYPTO_ALG_NEED_FALLBACK,
1693		.cra_blocksize		= SHA384_BLOCK_SIZE,
1694		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
1695					sizeof(struct omap_sham_hmac_ctx),
1696		.cra_alignmask		= OMAP_ALIGN_MASK,
1697		.cra_module		= THIS_MODULE,
1698		.cra_init		= omap_sham_cra_sha384_init,
1699		.cra_exit		= omap_sham_cra_exit,
1700	}
1701},
1702{
1703	.init		= omap_sham_init,
1704	.update		= omap_sham_update,
1705	.final		= omap_sham_final,
1706	.finup		= omap_sham_finup,
1707	.digest		= omap_sham_digest,
1708	.setkey		= omap_sham_setkey,
1709	.halg.digestsize	= SHA512_DIGEST_SIZE,
1710	.halg.base	= {
1711		.cra_name		= "hmac(sha512)",
1712		.cra_driver_name	= "omap-hmac-sha512",
1713		.cra_priority		= 400,
1714		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1715						CRYPTO_ALG_ASYNC |
1716						CRYPTO_ALG_NEED_FALLBACK,
1717		.cra_blocksize		= SHA512_BLOCK_SIZE,
1718		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
1719					sizeof(struct omap_sham_hmac_ctx),
1720		.cra_alignmask		= OMAP_ALIGN_MASK,
1721		.cra_module		= THIS_MODULE,
1722		.cra_init		= omap_sham_cra_sha512_init,
1723		.cra_exit		= omap_sham_cra_exit,
1724	}
1725},
1726};
1727
1728static void omap_sham_done_task(unsigned long data)
1729{
1730	struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
1731	int err = 0;
1732
1733	dev_dbg(dd->dev, "%s: flags=%lx\n", __func__, dd->flags);
1734
 
 
 
 
 
1735	if (test_bit(FLAGS_CPU, &dd->flags)) {
1736		if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags))
1737			goto finish;
1738	} else if (test_bit(FLAGS_DMA_READY, &dd->flags)) {
1739		if (test_bit(FLAGS_DMA_ACTIVE, &dd->flags)) {
1740			omap_sham_update_dma_stop(dd);
1741			if (dd->err) {
1742				err = dd->err;
1743				goto finish;
1744			}
1745		}
1746		if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) {
1747			/* hash or semi-hash ready */
1748			clear_bit(FLAGS_DMA_READY, &dd->flags);
1749			goto finish;
1750		}
1751	}
1752
1753	return;
1754
1755finish:
1756	dev_dbg(dd->dev, "update done: err: %d\n", err);
1757	/* finish curent request */
1758	omap_sham_finish_req(dd->req, err);
 
 
 
 
1759}
1760
1761static irqreturn_t omap_sham_irq_common(struct omap_sham_dev *dd)
1762{
1763	set_bit(FLAGS_OUTPUT_READY, &dd->flags);
1764	tasklet_schedule(&dd->done_task);
 
 
 
 
1765
1766	return IRQ_HANDLED;
1767}
1768
1769static irqreturn_t omap_sham_irq_omap2(int irq, void *dev_id)
1770{
1771	struct omap_sham_dev *dd = dev_id;
1772
1773	if (unlikely(test_bit(FLAGS_FINAL, &dd->flags)))
1774		/* final -> allow device to go to power-saving mode */
1775		omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH);
1776
1777	omap_sham_write_mask(dd, SHA_REG_CTRL, SHA_REG_CTRL_OUTPUT_READY,
1778				 SHA_REG_CTRL_OUTPUT_READY);
1779	omap_sham_read(dd, SHA_REG_CTRL);
1780
1781	return omap_sham_irq_common(dd);
1782}
1783
1784static irqreturn_t omap_sham_irq_omap4(int irq, void *dev_id)
1785{
1786	struct omap_sham_dev *dd = dev_id;
1787
1788	omap_sham_write_mask(dd, SHA_REG_MASK(dd), 0, SHA_REG_MASK_IT_EN);
1789
1790	return omap_sham_irq_common(dd);
1791}
1792
1793static struct omap_sham_algs_info omap_sham_algs_info_omap2[] = {
1794	{
1795		.algs_list	= algs_sha1_md5,
1796		.size		= ARRAY_SIZE(algs_sha1_md5),
1797	},
1798};
1799
1800static const struct omap_sham_pdata omap_sham_pdata_omap2 = {
1801	.algs_info	= omap_sham_algs_info_omap2,
1802	.algs_info_size	= ARRAY_SIZE(omap_sham_algs_info_omap2),
1803	.flags		= BIT(FLAGS_BE32_SHA1),
1804	.digest_size	= SHA1_DIGEST_SIZE,
1805	.copy_hash	= omap_sham_copy_hash_omap2,
1806	.write_ctrl	= omap_sham_write_ctrl_omap2,
1807	.trigger	= omap_sham_trigger_omap2,
1808	.poll_irq	= omap_sham_poll_irq_omap2,
1809	.intr_hdlr	= omap_sham_irq_omap2,
1810	.idigest_ofs	= 0x00,
1811	.din_ofs	= 0x1c,
1812	.digcnt_ofs	= 0x14,
1813	.rev_ofs	= 0x5c,
1814	.mask_ofs	= 0x60,
1815	.sysstatus_ofs	= 0x64,
1816	.major_mask	= 0xf0,
1817	.major_shift	= 4,
1818	.minor_mask	= 0x0f,
1819	.minor_shift	= 0,
1820};
1821
1822#ifdef CONFIG_OF
1823static struct omap_sham_algs_info omap_sham_algs_info_omap4[] = {
1824	{
1825		.algs_list	= algs_sha1_md5,
1826		.size		= ARRAY_SIZE(algs_sha1_md5),
1827	},
1828	{
1829		.algs_list	= algs_sha224_sha256,
1830		.size		= ARRAY_SIZE(algs_sha224_sha256),
1831	},
1832};
1833
1834static const struct omap_sham_pdata omap_sham_pdata_omap4 = {
1835	.algs_info	= omap_sham_algs_info_omap4,
1836	.algs_info_size	= ARRAY_SIZE(omap_sham_algs_info_omap4),
1837	.flags		= BIT(FLAGS_AUTO_XOR),
1838	.digest_size	= SHA256_DIGEST_SIZE,
1839	.copy_hash	= omap_sham_copy_hash_omap4,
1840	.write_ctrl	= omap_sham_write_ctrl_omap4,
1841	.trigger	= omap_sham_trigger_omap4,
1842	.poll_irq	= omap_sham_poll_irq_omap4,
1843	.intr_hdlr	= omap_sham_irq_omap4,
1844	.idigest_ofs	= 0x020,
1845	.odigest_ofs	= 0x0,
1846	.din_ofs	= 0x080,
1847	.digcnt_ofs	= 0x040,
1848	.rev_ofs	= 0x100,
1849	.mask_ofs	= 0x110,
1850	.sysstatus_ofs	= 0x114,
1851	.mode_ofs	= 0x44,
1852	.length_ofs	= 0x48,
1853	.major_mask	= 0x0700,
1854	.major_shift	= 8,
1855	.minor_mask	= 0x003f,
1856	.minor_shift	= 0,
1857};
1858
1859static struct omap_sham_algs_info omap_sham_algs_info_omap5[] = {
1860	{
1861		.algs_list	= algs_sha1_md5,
1862		.size		= ARRAY_SIZE(algs_sha1_md5),
1863	},
1864	{
1865		.algs_list	= algs_sha224_sha256,
1866		.size		= ARRAY_SIZE(algs_sha224_sha256),
1867	},
1868	{
1869		.algs_list	= algs_sha384_sha512,
1870		.size		= ARRAY_SIZE(algs_sha384_sha512),
1871	},
1872};
1873
1874static const struct omap_sham_pdata omap_sham_pdata_omap5 = {
1875	.algs_info	= omap_sham_algs_info_omap5,
1876	.algs_info_size	= ARRAY_SIZE(omap_sham_algs_info_omap5),
1877	.flags		= BIT(FLAGS_AUTO_XOR),
1878	.digest_size	= SHA512_DIGEST_SIZE,
1879	.copy_hash	= omap_sham_copy_hash_omap4,
1880	.write_ctrl	= omap_sham_write_ctrl_omap4,
1881	.trigger	= omap_sham_trigger_omap4,
1882	.poll_irq	= omap_sham_poll_irq_omap4,
1883	.intr_hdlr	= omap_sham_irq_omap4,
1884	.idigest_ofs	= 0x240,
1885	.odigest_ofs	= 0x200,
1886	.din_ofs	= 0x080,
1887	.digcnt_ofs	= 0x280,
1888	.rev_ofs	= 0x100,
1889	.mask_ofs	= 0x110,
1890	.sysstatus_ofs	= 0x114,
1891	.mode_ofs	= 0x284,
1892	.length_ofs	= 0x288,
1893	.major_mask	= 0x0700,
1894	.major_shift	= 8,
1895	.minor_mask	= 0x003f,
1896	.minor_shift	= 0,
1897};
1898
1899static const struct of_device_id omap_sham_of_match[] = {
1900	{
1901		.compatible	= "ti,omap2-sham",
1902		.data		= &omap_sham_pdata_omap2,
1903	},
1904	{
1905		.compatible	= "ti,omap3-sham",
1906		.data		= &omap_sham_pdata_omap2,
1907	},
1908	{
1909		.compatible	= "ti,omap4-sham",
1910		.data		= &omap_sham_pdata_omap4,
1911	},
1912	{
1913		.compatible	= "ti,omap5-sham",
1914		.data		= &omap_sham_pdata_omap5,
1915	},
1916	{},
1917};
1918MODULE_DEVICE_TABLE(of, omap_sham_of_match);
1919
1920static int omap_sham_get_res_of(struct omap_sham_dev *dd,
1921		struct device *dev, struct resource *res)
1922{
1923	struct device_node *node = dev->of_node;
1924	int err = 0;
1925
1926	dd->pdata = of_device_get_match_data(dev);
1927	if (!dd->pdata) {
1928		dev_err(dev, "no compatible OF match\n");
1929		err = -EINVAL;
1930		goto err;
1931	}
1932
1933	err = of_address_to_resource(node, 0, res);
1934	if (err < 0) {
1935		dev_err(dev, "can't translate OF node address\n");
1936		err = -EINVAL;
1937		goto err;
1938	}
1939
1940	dd->irq = irq_of_parse_and_map(node, 0);
1941	if (!dd->irq) {
1942		dev_err(dev, "can't translate OF irq value\n");
1943		err = -EINVAL;
1944		goto err;
1945	}
1946
1947err:
1948	return err;
1949}
1950#else
1951static const struct of_device_id omap_sham_of_match[] = {
1952	{},
1953};
1954
1955static int omap_sham_get_res_of(struct omap_sham_dev *dd,
1956		struct device *dev, struct resource *res)
1957{
1958	return -EINVAL;
1959}
1960#endif
1961
1962static int omap_sham_get_res_pdev(struct omap_sham_dev *dd,
1963		struct platform_device *pdev, struct resource *res)
1964{
1965	struct device *dev = &pdev->dev;
1966	struct resource *r;
1967	int err = 0;
1968
1969	/* Get the base address */
1970	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1971	if (!r) {
1972		dev_err(dev, "no MEM resource info\n");
1973		err = -ENODEV;
1974		goto err;
1975	}
1976	memcpy(res, r, sizeof(*res));
1977
1978	/* Get the IRQ */
1979	dd->irq = platform_get_irq(pdev, 0);
1980	if (dd->irq < 0) {
1981		err = dd->irq;
1982		goto err;
1983	}
1984
1985	/* Only OMAP2/3 can be non-DT */
1986	dd->pdata = &omap_sham_pdata_omap2;
1987
1988err:
1989	return err;
1990}
1991
1992static ssize_t fallback_show(struct device *dev, struct device_attribute *attr,
1993			     char *buf)
1994{
1995	struct omap_sham_dev *dd = dev_get_drvdata(dev);
1996
1997	return sprintf(buf, "%d\n", dd->fallback_sz);
1998}
1999
2000static ssize_t fallback_store(struct device *dev, struct device_attribute *attr,
2001			      const char *buf, size_t size)
2002{
2003	struct omap_sham_dev *dd = dev_get_drvdata(dev);
2004	ssize_t status;
2005	long value;
2006
2007	status = kstrtol(buf, 0, &value);
2008	if (status)
2009		return status;
2010
2011	/* HW accelerator only works with buffers > 9 */
2012	if (value < 9) {
2013		dev_err(dev, "minimum fallback size 9\n");
2014		return -EINVAL;
2015	}
2016
2017	dd->fallback_sz = value;
2018
2019	return size;
2020}
2021
2022static ssize_t queue_len_show(struct device *dev, struct device_attribute *attr,
2023			      char *buf)
2024{
2025	struct omap_sham_dev *dd = dev_get_drvdata(dev);
2026
2027	return sprintf(buf, "%d\n", dd->queue.max_qlen);
2028}
2029
2030static ssize_t queue_len_store(struct device *dev,
2031			       struct device_attribute *attr, const char *buf,
2032			       size_t size)
2033{
2034	struct omap_sham_dev *dd = dev_get_drvdata(dev);
2035	ssize_t status;
2036	long value;
 
2037
2038	status = kstrtol(buf, 0, &value);
2039	if (status)
2040		return status;
2041
2042	if (value < 1)
2043		return -EINVAL;
2044
2045	/*
2046	 * Changing the queue size in fly is safe, if size becomes smaller
2047	 * than current size, it will just not accept new entries until
2048	 * it has shrank enough.
2049	 */
 
2050	dd->queue.max_qlen = value;
 
2051
2052	return size;
2053}
2054
2055static DEVICE_ATTR_RW(queue_len);
2056static DEVICE_ATTR_RW(fallback);
2057
2058static struct attribute *omap_sham_attrs[] = {
2059	&dev_attr_queue_len.attr,
2060	&dev_attr_fallback.attr,
2061	NULL,
2062};
2063
2064static struct attribute_group omap_sham_attr_group = {
2065	.attrs = omap_sham_attrs,
2066};
2067
2068static int omap_sham_probe(struct platform_device *pdev)
2069{
2070	struct omap_sham_dev *dd;
2071	struct device *dev = &pdev->dev;
2072	struct resource res;
2073	dma_cap_mask_t mask;
2074	int err, i, j;
2075	u32 rev;
2076
2077	dd = devm_kzalloc(dev, sizeof(struct omap_sham_dev), GFP_KERNEL);
2078	if (dd == NULL) {
2079		dev_err(dev, "unable to alloc data struct.\n");
2080		err = -ENOMEM;
2081		goto data_err;
2082	}
2083	dd->dev = dev;
2084	platform_set_drvdata(pdev, dd);
2085
2086	INIT_LIST_HEAD(&dd->list);
 
2087	tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd);
2088	crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH);
2089
2090	err = (dev->of_node) ? omap_sham_get_res_of(dd, dev, &res) :
2091			       omap_sham_get_res_pdev(dd, pdev, &res);
2092	if (err)
2093		goto data_err;
2094
2095	dd->io_base = devm_ioremap_resource(dev, &res);
2096	if (IS_ERR(dd->io_base)) {
2097		err = PTR_ERR(dd->io_base);
2098		goto data_err;
2099	}
2100	dd->phys_base = res.start;
2101
2102	err = devm_request_irq(dev, dd->irq, dd->pdata->intr_hdlr,
2103			       IRQF_TRIGGER_NONE, dev_name(dev), dd);
2104	if (err) {
2105		dev_err(dev, "unable to request irq %d, err = %d\n",
2106			dd->irq, err);
2107		goto data_err;
2108	}
2109
2110	dma_cap_zero(mask);
2111	dma_cap_set(DMA_SLAVE, mask);
2112
2113	dd->dma_lch = dma_request_chan(dev, "rx");
2114	if (IS_ERR(dd->dma_lch)) {
2115		err = PTR_ERR(dd->dma_lch);
2116		if (err == -EPROBE_DEFER)
2117			goto data_err;
2118
2119		dd->polling_mode = 1;
2120		dev_dbg(dev, "using polling mode instead of dma\n");
2121	}
2122
2123	dd->flags |= dd->pdata->flags;
2124	sham.flags |= dd->pdata->flags;
2125
2126	pm_runtime_use_autosuspend(dev);
2127	pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY);
2128
2129	dd->fallback_sz = OMAP_SHA_DMA_THRESHOLD;
2130
2131	pm_runtime_enable(dev);
2132	pm_runtime_irq_safe(dev);
2133
2134	err = pm_runtime_get_sync(dev);
2135	if (err < 0) {
2136		dev_err(dev, "failed to get sync: %d\n", err);
2137		goto err_pm;
2138	}
2139
2140	rev = omap_sham_read(dd, SHA_REG_REV(dd));
2141	pm_runtime_put_sync(&pdev->dev);
2142
2143	dev_info(dev, "hw accel on OMAP rev %u.%u\n",
2144		(rev & dd->pdata->major_mask) >> dd->pdata->major_shift,
2145		(rev & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
2146
2147	spin_lock_bh(&sham.lock);
2148	list_add_tail(&dd->list, &sham.dev_list);
2149	spin_unlock_bh(&sham.lock);
2150
2151	dd->engine = crypto_engine_alloc_init(dev, 1);
2152	if (!dd->engine) {
2153		err = -ENOMEM;
2154		goto err_engine;
2155	}
2156
2157	err = crypto_engine_start(dd->engine);
2158	if (err)
2159		goto err_engine_start;
2160
2161	for (i = 0; i < dd->pdata->algs_info_size; i++) {
2162		if (dd->pdata->algs_info[i].registered)
2163			break;
2164
2165		for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
2166			struct ahash_alg *alg;
2167
2168			alg = &dd->pdata->algs_info[i].algs_list[j];
2169			alg->export = omap_sham_export;
2170			alg->import = omap_sham_import;
2171			alg->halg.statesize = sizeof(struct omap_sham_reqctx) +
2172					      BUFLEN;
2173			err = crypto_register_ahash(alg);
2174			if (err)
2175				goto err_algs;
2176
2177			dd->pdata->algs_info[i].registered++;
2178		}
2179	}
2180
2181	err = sysfs_create_group(&dev->kobj, &omap_sham_attr_group);
2182	if (err) {
2183		dev_err(dev, "could not create sysfs device attrs\n");
2184		goto err_algs;
2185	}
2186
2187	return 0;
2188
2189err_algs:
2190	for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
2191		for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
2192			crypto_unregister_ahash(
2193					&dd->pdata->algs_info[i].algs_list[j]);
2194err_engine_start:
2195	crypto_engine_exit(dd->engine);
2196err_engine:
2197	spin_lock_bh(&sham.lock);
2198	list_del(&dd->list);
2199	spin_unlock_bh(&sham.lock);
2200err_pm:
2201	pm_runtime_disable(dev);
2202	if (!dd->polling_mode)
2203		dma_release_channel(dd->dma_lch);
2204data_err:
2205	dev_err(dev, "initialization failed.\n");
2206
2207	return err;
2208}
2209
2210static int omap_sham_remove(struct platform_device *pdev)
2211{
2212	struct omap_sham_dev *dd;
2213	int i, j;
2214
2215	dd = platform_get_drvdata(pdev);
2216	if (!dd)
2217		return -ENODEV;
2218	spin_lock_bh(&sham.lock);
2219	list_del(&dd->list);
2220	spin_unlock_bh(&sham.lock);
2221	for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
2222		for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) {
2223			crypto_unregister_ahash(
2224					&dd->pdata->algs_info[i].algs_list[j]);
2225			dd->pdata->algs_info[i].registered--;
2226		}
2227	tasklet_kill(&dd->done_task);
2228	pm_runtime_disable(&pdev->dev);
2229
2230	if (!dd->polling_mode)
2231		dma_release_channel(dd->dma_lch);
2232
2233	sysfs_remove_group(&dd->dev->kobj, &omap_sham_attr_group);
2234
2235	return 0;
2236}
2237
2238#ifdef CONFIG_PM_SLEEP
2239static int omap_sham_suspend(struct device *dev)
2240{
2241	pm_runtime_put_sync(dev);
2242	return 0;
2243}
2244
2245static int omap_sham_resume(struct device *dev)
2246{
2247	int err = pm_runtime_resume_and_get(dev);
2248	if (err < 0) {
2249		dev_err(dev, "failed to get sync: %d\n", err);
2250		return err;
2251	}
2252	return 0;
2253}
2254#endif
2255
2256static SIMPLE_DEV_PM_OPS(omap_sham_pm_ops, omap_sham_suspend, omap_sham_resume);
2257
2258static struct platform_driver omap_sham_driver = {
2259	.probe	= omap_sham_probe,
2260	.remove	= omap_sham_remove,
2261	.driver	= {
2262		.name	= "omap-sham",
2263		.pm	= &omap_sham_pm_ops,
2264		.of_match_table	= omap_sham_of_match,
2265	},
2266};
2267
2268module_platform_driver(omap_sham_driver);
2269
2270MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support.");
2271MODULE_LICENSE("GPL v2");
2272MODULE_AUTHOR("Dmitry Kasatkin");
2273MODULE_ALIAS("platform:omap-sham");