Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Cryptographic API.
   4 *
   5 * Support for OMAP SHA1/MD5 HW acceleration.
   6 *
   7 * Copyright (c) 2010 Nokia Corporation
   8 * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
   9 * Copyright (c) 2011 Texas Instruments Incorporated
 
 
 
  10 *
  11 * Some ideas are from old omap-sha1-md5.c driver.
  12 */
  13
  14#define pr_fmt(fmt) "%s: " fmt, __func__
  15
  16#include <crypto/engine.h>
  17#include <crypto/hmac.h>
  18#include <crypto/internal/hash.h>
  19#include <crypto/scatterwalk.h>
  20#include <crypto/sha1.h>
  21#include <crypto/sha2.h>
  22#include <linux/err.h>
  23#include <linux/device.h>
  24#include <linux/dma-mapping.h>
  25#include <linux/dmaengine.h>
  26#include <linux/init.h>
 
  27#include <linux/interrupt.h>
  28#include <linux/io.h>
  29#include <linux/irq.h>
  30#include <linux/kernel.h>
  31#include <linux/module.h>
  32#include <linux/of.h>
  33#include <linux/of_address.h>
  34#include <linux/of_irq.h>
  35#include <linux/platform_device.h>
  36#include <linux/pm_runtime.h>
  37#include <linux/scatterlist.h>
  38#include <linux/slab.h>
  39#include <linux/string.h>
 
 
 
 
 
 
 
  40
  41#define MD5_DIGEST_SIZE			16
 
 
  42
  43#define SHA_REG_IDIGEST(dd, x)		((dd)->pdata->idigest_ofs + ((x)*0x04))
  44#define SHA_REG_DIN(dd, x)		((dd)->pdata->din_ofs + ((x) * 0x04))
  45#define SHA_REG_DIGCNT(dd)		((dd)->pdata->digcnt_ofs)
  46
  47#define SHA_REG_ODIGEST(dd, x)		((dd)->pdata->odigest_ofs + (x * 0x04))
 
 
 
  48
  49#define SHA_REG_CTRL			0x18
  50#define SHA_REG_CTRL_LENGTH		(0xFFFFFFFF << 5)
  51#define SHA_REG_CTRL_CLOSE_HASH		(1 << 4)
  52#define SHA_REG_CTRL_ALGO_CONST		(1 << 3)
  53#define SHA_REG_CTRL_ALGO		(1 << 2)
  54#define SHA_REG_CTRL_INPUT_READY	(1 << 1)
  55#define SHA_REG_CTRL_OUTPUT_READY	(1 << 0)
  56
  57#define SHA_REG_REV(dd)			((dd)->pdata->rev_ofs)
 
 
  58
  59#define SHA_REG_MASK(dd)		((dd)->pdata->mask_ofs)
  60#define SHA_REG_MASK_DMA_EN		(1 << 3)
  61#define SHA_REG_MASK_IT_EN		(1 << 2)
  62#define SHA_REG_MASK_SOFTRESET		(1 << 1)
  63#define SHA_REG_AUTOIDLE		(1 << 0)
  64
  65#define SHA_REG_SYSSTATUS(dd)		((dd)->pdata->sysstatus_ofs)
  66#define SHA_REG_SYSSTATUS_RESETDONE	(1 << 0)
  67
  68#define SHA_REG_MODE(dd)		((dd)->pdata->mode_ofs)
  69#define SHA_REG_MODE_HMAC_OUTER_HASH	(1 << 7)
  70#define SHA_REG_MODE_HMAC_KEY_PROC	(1 << 5)
  71#define SHA_REG_MODE_CLOSE_HASH		(1 << 4)
  72#define SHA_REG_MODE_ALGO_CONSTANT	(1 << 3)
  73
  74#define SHA_REG_MODE_ALGO_MASK		(7 << 0)
  75#define SHA_REG_MODE_ALGO_MD5_128	(0 << 1)
  76#define SHA_REG_MODE_ALGO_SHA1_160	(1 << 1)
  77#define SHA_REG_MODE_ALGO_SHA2_224	(2 << 1)
  78#define SHA_REG_MODE_ALGO_SHA2_256	(3 << 1)
  79#define SHA_REG_MODE_ALGO_SHA2_384	(1 << 0)
  80#define SHA_REG_MODE_ALGO_SHA2_512	(3 << 0)
  81
  82#define SHA_REG_LENGTH(dd)		((dd)->pdata->length_ofs)
  83
  84#define SHA_REG_IRQSTATUS		0x118
  85#define SHA_REG_IRQSTATUS_CTX_RDY	(1 << 3)
  86#define SHA_REG_IRQSTATUS_PARTHASH_RDY (1 << 2)
  87#define SHA_REG_IRQSTATUS_INPUT_RDY	(1 << 1)
  88#define SHA_REG_IRQSTATUS_OUTPUT_RDY	(1 << 0)
  89
  90#define SHA_REG_IRQENA			0x11C
  91#define SHA_REG_IRQENA_CTX_RDY		(1 << 3)
  92#define SHA_REG_IRQENA_PARTHASH_RDY	(1 << 2)
  93#define SHA_REG_IRQENA_INPUT_RDY	(1 << 1)
  94#define SHA_REG_IRQENA_OUTPUT_RDY	(1 << 0)
  95
  96#define DEFAULT_TIMEOUT_INTERVAL	HZ
  97
  98#define DEFAULT_AUTOSUSPEND_DELAY	1000
  99
 100/* mostly device flags */
 
 101#define FLAGS_FINAL		1
 102#define FLAGS_DMA_ACTIVE	2
 103#define FLAGS_OUTPUT_READY	3
 
 104#define FLAGS_CPU		5
 105#define FLAGS_DMA_READY		6
 106#define FLAGS_AUTO_XOR		7
 107#define FLAGS_BE32_SHA1		8
 108#define FLAGS_SGS_COPIED	9
 109#define FLAGS_SGS_ALLOCED	10
 110#define FLAGS_HUGE		11
 111
 112/* context flags */
 113#define FLAGS_FINUP		16
 
 
 
 
 114
 115#define FLAGS_MODE_SHIFT	18
 116#define FLAGS_MODE_MASK		(SHA_REG_MODE_ALGO_MASK	<< FLAGS_MODE_SHIFT)
 117#define FLAGS_MODE_MD5		(SHA_REG_MODE_ALGO_MD5_128 << FLAGS_MODE_SHIFT)
 118#define FLAGS_MODE_SHA1		(SHA_REG_MODE_ALGO_SHA1_160 << FLAGS_MODE_SHIFT)
 119#define FLAGS_MODE_SHA224	(SHA_REG_MODE_ALGO_SHA2_224 << FLAGS_MODE_SHIFT)
 120#define FLAGS_MODE_SHA256	(SHA_REG_MODE_ALGO_SHA2_256 << FLAGS_MODE_SHIFT)
 121#define FLAGS_MODE_SHA384	(SHA_REG_MODE_ALGO_SHA2_384 << FLAGS_MODE_SHIFT)
 122#define FLAGS_MODE_SHA512	(SHA_REG_MODE_ALGO_SHA2_512 << FLAGS_MODE_SHIFT)
 123
 124#define FLAGS_HMAC		21
 125#define FLAGS_ERROR		22
 126
 127#define OP_UPDATE		1
 128#define OP_FINAL		2
 129
 130#define OMAP_ALIGN_MASK		(sizeof(u32)-1)
 131#define OMAP_ALIGNED		__attribute__((aligned(sizeof(u32))))
 132
 133#define BUFLEN			SHA512_BLOCK_SIZE
 134#define OMAP_SHA_DMA_THRESHOLD	256
 135
 136#define OMAP_SHA_MAX_DMA_LEN	(1024 * 2048)
 137
 138struct omap_sham_dev;
 139
 140struct omap_sham_reqctx {
 141	struct omap_sham_dev	*dd;
 142	unsigned long		flags;
 143	u8			op;
 144
 145	u8			digest[SHA512_DIGEST_SIZE] OMAP_ALIGNED;
 146	size_t			digcnt;
 147	size_t			bufcnt;
 148	size_t			buflen;
 
 149
 150	/* walk state */
 151	struct scatterlist	*sg;
 152	struct scatterlist	sgl[2];
 153	int			offset;	/* offset in current sg */
 154	int			sg_len;
 155	unsigned int		total;	/* total request */
 156
 157	u8			buffer[] OMAP_ALIGNED;
 158};
 159
 160struct omap_sham_hmac_ctx {
 161	struct crypto_shash	*shash;
 162	u8			ipad[SHA512_BLOCK_SIZE] OMAP_ALIGNED;
 163	u8			opad[SHA512_BLOCK_SIZE] OMAP_ALIGNED;
 164};
 165
 166struct omap_sham_ctx {
 
 
 167	unsigned long		flags;
 168
 169	/* fallback stuff */
 170	struct crypto_shash	*fallback;
 171
 172	struct omap_sham_hmac_ctx base[];
 173};
 174
 175#define OMAP_SHAM_QUEUE_LENGTH	10
 176
 177struct omap_sham_algs_info {
 178	struct ahash_engine_alg	*algs_list;
 179	unsigned int		size;
 180	unsigned int		registered;
 181};
 182
 183struct omap_sham_pdata {
 184	struct omap_sham_algs_info	*algs_info;
 185	unsigned int	algs_info_size;
 186	unsigned long	flags;
 187	int		digest_size;
 188
 189	void		(*copy_hash)(struct ahash_request *req, int out);
 190	void		(*write_ctrl)(struct omap_sham_dev *dd, size_t length,
 191				      int final, int dma);
 192	void		(*trigger)(struct omap_sham_dev *dd, size_t length);
 193	int		(*poll_irq)(struct omap_sham_dev *dd);
 194	irqreturn_t	(*intr_hdlr)(int irq, void *dev_id);
 195
 196	u32		odigest_ofs;
 197	u32		idigest_ofs;
 198	u32		din_ofs;
 199	u32		digcnt_ofs;
 200	u32		rev_ofs;
 201	u32		mask_ofs;
 202	u32		sysstatus_ofs;
 203	u32		mode_ofs;
 204	u32		length_ofs;
 205
 206	u32		major_mask;
 207	u32		major_shift;
 208	u32		minor_mask;
 209	u32		minor_shift;
 210};
 211
 212struct omap_sham_dev {
 213	struct list_head	list;
 214	unsigned long		phys_base;
 215	struct device		*dev;
 216	void __iomem		*io_base;
 217	int			irq;
 
 
 218	int			err;
 219	struct dma_chan		*dma_lch;
 
 220	struct tasklet_struct	done_task;
 221	u8			polling_mode;
 222	u8			xmit_buf[BUFLEN] OMAP_ALIGNED;
 223
 224	unsigned long		flags;
 225	int			fallback_sz;
 226	struct crypto_queue	queue;
 227	struct ahash_request	*req;
 228	struct crypto_engine	*engine;
 229
 230	const struct omap_sham_pdata	*pdata;
 231};
 232
 233struct omap_sham_drv {
 234	struct list_head	dev_list;
 235	spinlock_t		lock;
 236	unsigned long		flags;
 237};
 238
 239static struct omap_sham_drv sham = {
 240	.dev_list = LIST_HEAD_INIT(sham.dev_list),
 241	.lock = __SPIN_LOCK_UNLOCKED(sham.lock),
 242};
 243
 244static int omap_sham_enqueue(struct ahash_request *req, unsigned int op);
 245static void omap_sham_finish_req(struct ahash_request *req, int err);
 246
 247static inline u32 omap_sham_read(struct omap_sham_dev *dd, u32 offset)
 248{
 249	return __raw_readl(dd->io_base + offset);
 250}
 251
 252static inline void omap_sham_write(struct omap_sham_dev *dd,
 253					u32 offset, u32 value)
 254{
 255	__raw_writel(value, dd->io_base + offset);
 256}
 257
 258static inline void omap_sham_write_mask(struct omap_sham_dev *dd, u32 address,
 259					u32 value, u32 mask)
 260{
 261	u32 val;
 262
 263	val = omap_sham_read(dd, address);
 264	val &= ~mask;
 265	val |= value;
 266	omap_sham_write(dd, address, val);
 267}
 268
 269static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit)
 270{
 271	unsigned long timeout = jiffies + DEFAULT_TIMEOUT_INTERVAL;
 272
 273	while (!(omap_sham_read(dd, offset) & bit)) {
 274		if (time_is_before_jiffies(timeout))
 275			return -ETIMEDOUT;
 276	}
 277
 278	return 0;
 279}
 280
 281static void omap_sham_copy_hash_omap2(struct ahash_request *req, int out)
 282{
 283	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 284	struct omap_sham_dev *dd = ctx->dd;
 285	u32 *hash = (u32 *)ctx->digest;
 286	int i;
 287
 288	for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) {
 
 289		if (out)
 290			hash[i] = omap_sham_read(dd, SHA_REG_IDIGEST(dd, i));
 
 291		else
 292			omap_sham_write(dd, SHA_REG_IDIGEST(dd, i), hash[i]);
 
 293	}
 294}
 295
 296static void omap_sham_copy_hash_omap4(struct ahash_request *req, int out)
 297{
 298	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 299	struct omap_sham_dev *dd = ctx->dd;
 
 300	int i;
 301
 302	if (ctx->flags & BIT(FLAGS_HMAC)) {
 303		struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req);
 304		struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
 305		struct omap_sham_hmac_ctx *bctx = tctx->base;
 306		u32 *opad = (u32 *)bctx->opad;
 307
 308		for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) {
 309			if (out)
 310				opad[i] = omap_sham_read(dd,
 311						SHA_REG_ODIGEST(dd, i));
 312			else
 313				omap_sham_write(dd, SHA_REG_ODIGEST(dd, i),
 314						opad[i]);
 315		}
 316	}
 317
 318	omap_sham_copy_hash_omap2(req, out);
 319}
 320
 321static void omap_sham_copy_ready_hash(struct ahash_request *req)
 322{
 323	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 324	u32 *in = (u32 *)ctx->digest;
 325	u32 *hash = (u32 *)req->result;
 326	int i, d, big_endian = 0;
 327
 328	if (!hash)
 329		return;
 
 330
 331	switch (ctx->flags & FLAGS_MODE_MASK) {
 332	case FLAGS_MODE_MD5:
 333		d = MD5_DIGEST_SIZE / sizeof(u32);
 334		break;
 335	case FLAGS_MODE_SHA1:
 336		/* OMAP2 SHA1 is big endian */
 337		if (test_bit(FLAGS_BE32_SHA1, &ctx->dd->flags))
 338			big_endian = 1;
 339		d = SHA1_DIGEST_SIZE / sizeof(u32);
 340		break;
 341	case FLAGS_MODE_SHA224:
 342		d = SHA224_DIGEST_SIZE / sizeof(u32);
 343		break;
 344	case FLAGS_MODE_SHA256:
 345		d = SHA256_DIGEST_SIZE / sizeof(u32);
 346		break;
 347	case FLAGS_MODE_SHA384:
 348		d = SHA384_DIGEST_SIZE / sizeof(u32);
 349		break;
 350	case FLAGS_MODE_SHA512:
 351		d = SHA512_DIGEST_SIZE / sizeof(u32);
 352		break;
 353	default:
 354		d = 0;
 355	}
 356
 357	if (big_endian)
 358		for (i = 0; i < d; i++)
 359			put_unaligned(be32_to_cpup((__be32 *)in + i), &hash[i]);
 360	else
 361		for (i = 0; i < d; i++)
 362			put_unaligned(le32_to_cpup((__le32 *)in + i), &hash[i]);
 363}
 364
 365static void omap_sham_write_ctrl_omap2(struct omap_sham_dev *dd, size_t length,
 366				 int final, int dma)
 367{
 368	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 369	u32 val = length << 5, mask;
 370
 371	if (likely(ctx->digcnt))
 372		omap_sham_write(dd, SHA_REG_DIGCNT(dd), ctx->digcnt);
 373
 374	omap_sham_write_mask(dd, SHA_REG_MASK(dd),
 375		SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0),
 376		SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
 377	/*
 378	 * Setting ALGO_CONST only for the first iteration
 379	 * and CLOSE_HASH only for the last one.
 380	 */
 381	if ((ctx->flags & FLAGS_MODE_MASK) == FLAGS_MODE_SHA1)
 382		val |= SHA_REG_CTRL_ALGO;
 383	if (!ctx->digcnt)
 384		val |= SHA_REG_CTRL_ALGO_CONST;
 385	if (final)
 386		val |= SHA_REG_CTRL_CLOSE_HASH;
 387
 388	mask = SHA_REG_CTRL_ALGO_CONST | SHA_REG_CTRL_CLOSE_HASH |
 389			SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH;
 390
 391	omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask);
 392}
 393
 394static void omap_sham_trigger_omap2(struct omap_sham_dev *dd, size_t length)
 395{
 396}
 397
 398static int omap_sham_poll_irq_omap2(struct omap_sham_dev *dd)
 399{
 400	return omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY);
 401}
 402
 403static int get_block_size(struct omap_sham_reqctx *ctx)
 404{
 405	int d;
 406
 407	switch (ctx->flags & FLAGS_MODE_MASK) {
 408	case FLAGS_MODE_MD5:
 409	case FLAGS_MODE_SHA1:
 410		d = SHA1_BLOCK_SIZE;
 411		break;
 412	case FLAGS_MODE_SHA224:
 413	case FLAGS_MODE_SHA256:
 414		d = SHA256_BLOCK_SIZE;
 415		break;
 416	case FLAGS_MODE_SHA384:
 417	case FLAGS_MODE_SHA512:
 418		d = SHA512_BLOCK_SIZE;
 419		break;
 420	default:
 421		d = 0;
 422	}
 423
 424	return d;
 425}
 426
 427static void omap_sham_write_n(struct omap_sham_dev *dd, u32 offset,
 428				    u32 *value, int count)
 429{
 430	for (; count--; value++, offset += 4)
 431		omap_sham_write(dd, offset, *value);
 432}
 433
 434static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length,
 435				 int final, int dma)
 436{
 437	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 438	u32 val, mask;
 
 439
 440	if (likely(ctx->digcnt))
 441		omap_sham_write(dd, SHA_REG_DIGCNT(dd), ctx->digcnt);
 442
 443	/*
 444	 * Setting ALGO_CONST only for the first iteration and
 445	 * CLOSE_HASH only for the last one. Note that flags mode bits
 446	 * correspond to algorithm encoding in mode register.
 447	 */
 448	val = (ctx->flags & FLAGS_MODE_MASK) >> (FLAGS_MODE_SHIFT);
 449	if (!ctx->digcnt) {
 450		struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req);
 451		struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
 452		struct omap_sham_hmac_ctx *bctx = tctx->base;
 453		int bs, nr_dr;
 454
 455		val |= SHA_REG_MODE_ALGO_CONSTANT;
 456
 457		if (ctx->flags & BIT(FLAGS_HMAC)) {
 458			bs = get_block_size(ctx);
 459			nr_dr = bs / (2 * sizeof(u32));
 460			val |= SHA_REG_MODE_HMAC_KEY_PROC;
 461			omap_sham_write_n(dd, SHA_REG_ODIGEST(dd, 0),
 462					  (u32 *)bctx->ipad, nr_dr);
 463			omap_sham_write_n(dd, SHA_REG_IDIGEST(dd, 0),
 464					  (u32 *)bctx->ipad + nr_dr, nr_dr);
 465			ctx->digcnt += bs;
 466		}
 467	}
 468
 469	if (final) {
 470		val |= SHA_REG_MODE_CLOSE_HASH;
 471
 472		if (ctx->flags & BIT(FLAGS_HMAC))
 473			val |= SHA_REG_MODE_HMAC_OUTER_HASH;
 474	}
 475
 476	mask = SHA_REG_MODE_ALGO_CONSTANT | SHA_REG_MODE_CLOSE_HASH |
 477	       SHA_REG_MODE_ALGO_MASK | SHA_REG_MODE_HMAC_OUTER_HASH |
 478	       SHA_REG_MODE_HMAC_KEY_PROC;
 479
 480	dev_dbg(dd->dev, "ctrl: %08x, flags: %08lx\n", val, ctx->flags);
 481	omap_sham_write_mask(dd, SHA_REG_MODE(dd), val, mask);
 482	omap_sham_write(dd, SHA_REG_IRQENA, SHA_REG_IRQENA_OUTPUT_RDY);
 483	omap_sham_write_mask(dd, SHA_REG_MASK(dd),
 484			     SHA_REG_MASK_IT_EN |
 485				     (dma ? SHA_REG_MASK_DMA_EN : 0),
 486			     SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
 487}
 488
 489static void omap_sham_trigger_omap4(struct omap_sham_dev *dd, size_t length)
 490{
 491	omap_sham_write(dd, SHA_REG_LENGTH(dd), length);
 492}
 493
 494static int omap_sham_poll_irq_omap4(struct omap_sham_dev *dd)
 495{
 496	return omap_sham_wait(dd, SHA_REG_IRQSTATUS,
 497			      SHA_REG_IRQSTATUS_INPUT_RDY);
 498}
 499
 500static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, size_t length,
 501			      int final)
 502{
 503	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 504	int count, len32, bs32, offset = 0;
 505	const u32 *buffer;
 506	int mlen;
 507	struct sg_mapping_iter mi;
 508
 509	dev_dbg(dd->dev, "xmit_cpu: digcnt: %zd, length: %zd, final: %d\n",
 510						ctx->digcnt, length, final);
 511
 512	dd->pdata->write_ctrl(dd, length, final, 0);
 513	dd->pdata->trigger(dd, length);
 514
 515	/* should be non-zero before next lines to disable clocks later */
 516	ctx->digcnt += length;
 517	ctx->total -= length;
 
 
 518
 519	if (final)
 520		set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
 521
 522	set_bit(FLAGS_CPU, &dd->flags);
 523
 524	len32 = DIV_ROUND_UP(length, sizeof(u32));
 525	bs32 = get_block_size(ctx) / sizeof(u32);
 526
 527	sg_miter_start(&mi, ctx->sg, ctx->sg_len,
 528		       SG_MITER_FROM_SG | SG_MITER_ATOMIC);
 529
 530	mlen = 0;
 531
 532	while (len32) {
 533		if (dd->pdata->poll_irq(dd))
 534			return -ETIMEDOUT;
 535
 536		for (count = 0; count < min(len32, bs32); count++, offset++) {
 537			if (!mlen) {
 538				sg_miter_next(&mi);
 539				mlen = mi.length;
 540				if (!mlen) {
 541					pr_err("sg miter failure.\n");
 542					return -EINVAL;
 543				}
 544				offset = 0;
 545				buffer = mi.addr;
 546			}
 547			omap_sham_write(dd, SHA_REG_DIN(dd, count),
 548					buffer[offset]);
 549			mlen -= 4;
 550		}
 551		len32 -= min(len32, bs32);
 552	}
 553
 554	sg_miter_stop(&mi);
 555
 556	return -EINPROGRESS;
 557}
 558
 559static void omap_sham_dma_callback(void *param)
 560{
 561	struct omap_sham_dev *dd = param;
 562
 563	set_bit(FLAGS_DMA_READY, &dd->flags);
 564	tasklet_schedule(&dd->done_task);
 565}
 566
 567static int omap_sham_xmit_dma(struct omap_sham_dev *dd, size_t length,
 568			      int final)
 569{
 570	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 571	struct dma_async_tx_descriptor *tx;
 572	struct dma_slave_config cfg;
 573	int ret;
 574
 575	dev_dbg(dd->dev, "xmit_dma: digcnt: %zd, length: %zd, final: %d\n",
 576						ctx->digcnt, length, final);
 577
 578	if (!dma_map_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE)) {
 579		dev_err(dd->dev, "dma_map_sg error\n");
 580		return -EINVAL;
 581	}
 582
 583	memset(&cfg, 0, sizeof(cfg));
 584
 585	cfg.dst_addr = dd->phys_base + SHA_REG_DIN(dd, 0);
 586	cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
 587	cfg.dst_maxburst = get_block_size(ctx) / DMA_SLAVE_BUSWIDTH_4_BYTES;
 588
 589	ret = dmaengine_slave_config(dd->dma_lch, &cfg);
 590	if (ret) {
 591		pr_err("omap-sham: can't configure dmaengine slave: %d\n", ret);
 592		return ret;
 593	}
 594
 595	tx = dmaengine_prep_slave_sg(dd->dma_lch, ctx->sg, ctx->sg_len,
 596				     DMA_MEM_TO_DEV,
 597				     DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 598
 599	if (!tx) {
 600		dev_err(dd->dev, "prep_slave_sg failed\n");
 601		return -EINVAL;
 602	}
 603
 604	tx->callback = omap_sham_dma_callback;
 605	tx->callback_param = dd;
 606
 607	dd->pdata->write_ctrl(dd, length, final, 1);
 608
 609	ctx->digcnt += length;
 610	ctx->total -= length;
 611
 612	if (final)
 613		set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
 614
 615	set_bit(FLAGS_DMA_ACTIVE, &dd->flags);
 616
 617	dmaengine_submit(tx);
 618	dma_async_issue_pending(dd->dma_lch);
 619
 620	dd->pdata->trigger(dd, length);
 621
 622	return -EINPROGRESS;
 623}
 624
 625static int omap_sham_copy_sg_lists(struct omap_sham_reqctx *ctx,
 626				   struct scatterlist *sg, int bs, int new_len)
 627{
 628	int n = sg_nents(sg);
 629	struct scatterlist *tmp;
 630	int offset = ctx->offset;
 631
 632	ctx->total = new_len;
 633
 634	if (ctx->bufcnt)
 635		n++;
 636
 637	ctx->sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL);
 638	if (!ctx->sg)
 639		return -ENOMEM;
 640
 641	sg_init_table(ctx->sg, n);
 642
 643	tmp = ctx->sg;
 644
 645	ctx->sg_len = 0;
 646
 647	if (ctx->bufcnt) {
 648		sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt);
 649		tmp = sg_next(tmp);
 650		ctx->sg_len++;
 651		new_len -= ctx->bufcnt;
 652	}
 653
 654	while (sg && new_len) {
 655		int len = sg->length - offset;
 656
 657		if (len <= 0) {
 658			offset -= sg->length;
 659			sg = sg_next(sg);
 660			continue;
 661		}
 662
 663		if (new_len < len)
 664			len = new_len;
 
 665
 666		if (len > 0) {
 667			new_len -= len;
 668			sg_set_page(tmp, sg_page(sg), len, sg->offset + offset);
 669			offset = 0;
 670			ctx->offset = 0;
 671			ctx->sg_len++;
 672			if (new_len <= 0)
 673				break;
 674			tmp = sg_next(tmp);
 
 
 
 
 
 675		}
 676
 677		sg = sg_next(sg);
 678	}
 679
 680	if (tmp)
 681		sg_mark_end(tmp);
 682
 683	set_bit(FLAGS_SGS_ALLOCED, &ctx->dd->flags);
 684
 685	ctx->offset += new_len - ctx->bufcnt;
 686	ctx->bufcnt = 0;
 687
 688	return 0;
 689}
 690
 691static int omap_sham_copy_sgs(struct omap_sham_reqctx *ctx,
 692			      struct scatterlist *sg, int bs,
 693			      unsigned int new_len)
 694{
 695	int pages;
 696	void *buf;
 697
 698	pages = get_order(new_len);
 699
 700	buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
 701	if (!buf) {
 702		pr_err("Couldn't allocate pages for unaligned cases.\n");
 703		return -ENOMEM;
 704	}
 705
 706	if (ctx->bufcnt)
 707		memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt);
 708
 709	scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->offset,
 710				 min(new_len, ctx->total) - ctx->bufcnt, 0);
 711	sg_init_table(ctx->sgl, 1);
 712	sg_set_buf(ctx->sgl, buf, new_len);
 713	ctx->sg = ctx->sgl;
 714	set_bit(FLAGS_SGS_COPIED, &ctx->dd->flags);
 715	ctx->sg_len = 1;
 716	ctx->offset += new_len - ctx->bufcnt;
 717	ctx->bufcnt = 0;
 718	ctx->total = new_len;
 719
 720	return 0;
 721}
 722
 723static int omap_sham_align_sgs(struct scatterlist *sg,
 724			       int nbytes, int bs, bool final,
 725			       struct omap_sham_reqctx *rctx)
 726{
 727	int n = 0;
 728	bool aligned = true;
 729	bool list_ok = true;
 730	struct scatterlist *sg_tmp = sg;
 731	int new_len;
 732	int offset = rctx->offset;
 733	int bufcnt = rctx->bufcnt;
 734
 735	if (!sg || !sg->length || !nbytes) {
 736		if (bufcnt) {
 737			bufcnt = DIV_ROUND_UP(bufcnt, bs) * bs;
 738			sg_init_table(rctx->sgl, 1);
 739			sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, bufcnt);
 740			rctx->sg = rctx->sgl;
 741			rctx->sg_len = 1;
 742		}
 743
 744		return 0;
 745	}
 746
 747	new_len = nbytes;
 748
 749	if (offset)
 750		list_ok = false;
 751
 752	if (final)
 753		new_len = DIV_ROUND_UP(new_len, bs) * bs;
 754	else
 755		new_len = (new_len - 1) / bs * bs;
 756
 757	if (!new_len)
 758		return 0;
 759
 760	if (nbytes != new_len)
 761		list_ok = false;
 762
 763	while (nbytes > 0 && sg_tmp) {
 764		n++;
 765
 766		if (bufcnt) {
 767			if (!IS_ALIGNED(bufcnt, bs)) {
 768				aligned = false;
 769				break;
 770			}
 771			nbytes -= bufcnt;
 772			bufcnt = 0;
 773			if (!nbytes)
 774				list_ok = false;
 775
 776			continue;
 777		}
 778
 779#ifdef CONFIG_ZONE_DMA
 780		if (page_zonenum(sg_page(sg_tmp)) != ZONE_DMA) {
 781			aligned = false;
 782			break;
 783		}
 784#endif
 785
 786		if (offset < sg_tmp->length) {
 787			if (!IS_ALIGNED(offset + sg_tmp->offset, 4)) {
 788				aligned = false;
 789				break;
 790			}
 791
 792			if (!IS_ALIGNED(sg_tmp->length - offset, bs)) {
 793				aligned = false;
 794				break;
 795			}
 796		}
 797
 798		if (offset) {
 799			offset -= sg_tmp->length;
 800			if (offset < 0) {
 801				nbytes += offset;
 802				offset = 0;
 803			}
 804		} else {
 805			nbytes -= sg_tmp->length;
 806		}
 807
 808		sg_tmp = sg_next(sg_tmp);
 809
 810		if (nbytes < 0) {
 811			list_ok = false;
 812			break;
 813		}
 814	}
 815
 816	if (new_len > OMAP_SHA_MAX_DMA_LEN) {
 817		new_len = OMAP_SHA_MAX_DMA_LEN;
 818		aligned = false;
 819	}
 820
 821	if (!aligned)
 822		return omap_sham_copy_sgs(rctx, sg, bs, new_len);
 823	else if (!list_ok)
 824		return omap_sham_copy_sg_lists(rctx, sg, bs, new_len);
 825
 826	rctx->total = new_len;
 827	rctx->offset += new_len;
 828	rctx->sg_len = n;
 829	if (rctx->bufcnt) {
 830		sg_init_table(rctx->sgl, 2);
 831		sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, rctx->bufcnt);
 832		sg_chain(rctx->sgl, 2, sg);
 833		rctx->sg = rctx->sgl;
 834	} else {
 835		rctx->sg = sg;
 836	}
 837
 838	return 0;
 839}
 840
 841static int omap_sham_prepare_request(struct crypto_engine *engine, void *areq)
 
 
 
 
 
 842{
 843	struct ahash_request *req = container_of(areq, struct ahash_request,
 844						 base);
 845	struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
 846	int bs;
 847	int ret;
 848	unsigned int nbytes;
 849	bool final = rctx->flags & BIT(FLAGS_FINUP);
 850	bool update = rctx->op == OP_UPDATE;
 851	int hash_later;
 852
 853	bs = get_block_size(rctx);
 854
 855	nbytes = rctx->bufcnt;
 856
 857	if (update)
 858		nbytes += req->nbytes - rctx->offset;
 859
 860	dev_dbg(rctx->dd->dev,
 861		"%s: nbytes=%d, bs=%d, total=%d, offset=%d, bufcnt=%zd\n",
 862		__func__, nbytes, bs, rctx->total, rctx->offset,
 863		rctx->bufcnt);
 864
 865	if (!nbytes)
 866		return 0;
 867
 868	rctx->total = nbytes;
 
 869
 870	if (update && req->nbytes && (!IS_ALIGNED(rctx->bufcnt, bs))) {
 871		int len = bs - rctx->bufcnt % bs;
 872
 873		if (len > req->nbytes)
 874			len = req->nbytes;
 875		scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, req->src,
 876					 0, len, 0);
 877		rctx->bufcnt += len;
 878		rctx->offset = len;
 879	}
 880
 881	if (rctx->bufcnt)
 882		memcpy(rctx->dd->xmit_buf, rctx->buffer, rctx->bufcnt);
 883
 884	ret = omap_sham_align_sgs(req->src, nbytes, bs, final, rctx);
 885	if (ret)
 886		return ret;
 887
 888	hash_later = nbytes - rctx->total;
 889	if (hash_later < 0)
 890		hash_later = 0;
 891
 892	if (hash_later && hash_later <= rctx->buflen) {
 893		scatterwalk_map_and_copy(rctx->buffer,
 894					 req->src,
 895					 req->nbytes - hash_later,
 896					 hash_later, 0);
 
 
 
 
 
 
 897
 898		rctx->bufcnt = hash_later;
 899	} else {
 900		rctx->bufcnt = 0;
 901	}
 902
 903	if (hash_later > rctx->buflen)
 904		set_bit(FLAGS_HUGE, &rctx->dd->flags);
 905
 906	rctx->total = min(nbytes, rctx->total);
 
 907
 908	return 0;
 
 
 
 909}
 910
 911static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
 912{
 913	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 
 914
 915	dma_unmap_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
 916
 917	clear_bit(FLAGS_DMA_ACTIVE, &dd->flags);
 918
 919	return 0;
 920}
 921
 922static struct omap_sham_dev *omap_sham_find_dev(struct omap_sham_reqctx *ctx)
 923{
 924	struct omap_sham_dev *dd;
 925
 926	if (ctx->dd)
 927		return ctx->dd;
 928
 929	spin_lock_bh(&sham.lock);
 930	dd = list_first_entry(&sham.dev_list, struct omap_sham_dev, list);
 931	list_move_tail(&dd->list, &sham.dev_list);
 932	ctx->dd = dd;
 933	spin_unlock_bh(&sham.lock);
 
 
 
 
 
 
 
 934
 935	return dd;
 936}
 937
 938static int omap_sham_init(struct ahash_request *req)
 939{
 940	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 941	struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
 942	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 943	struct omap_sham_dev *dd;
 944	int bs = 0;
 945
 946	ctx->dd = NULL;
 
 
 
 
 
 
 
 
 
 
 947
 948	dd = omap_sham_find_dev(ctx);
 949	if (!dd)
 950		return -ENODEV;
 951
 952	ctx->flags = 0;
 953
 954	dev_dbg(dd->dev, "init: digest size: %d\n",
 955		crypto_ahash_digestsize(tfm));
 956
 957	switch (crypto_ahash_digestsize(tfm)) {
 958	case MD5_DIGEST_SIZE:
 959		ctx->flags |= FLAGS_MODE_MD5;
 960		bs = SHA1_BLOCK_SIZE;
 961		break;
 962	case SHA1_DIGEST_SIZE:
 963		ctx->flags |= FLAGS_MODE_SHA1;
 964		bs = SHA1_BLOCK_SIZE;
 965		break;
 966	case SHA224_DIGEST_SIZE:
 967		ctx->flags |= FLAGS_MODE_SHA224;
 968		bs = SHA224_BLOCK_SIZE;
 969		break;
 970	case SHA256_DIGEST_SIZE:
 971		ctx->flags |= FLAGS_MODE_SHA256;
 972		bs = SHA256_BLOCK_SIZE;
 973		break;
 974	case SHA384_DIGEST_SIZE:
 975		ctx->flags |= FLAGS_MODE_SHA384;
 976		bs = SHA384_BLOCK_SIZE;
 977		break;
 978	case SHA512_DIGEST_SIZE:
 979		ctx->flags |= FLAGS_MODE_SHA512;
 980		bs = SHA512_BLOCK_SIZE;
 981		break;
 982	}
 983
 984	ctx->bufcnt = 0;
 985	ctx->digcnt = 0;
 986	ctx->total = 0;
 987	ctx->offset = 0;
 988	ctx->buflen = BUFLEN;
 989
 990	if (tctx->flags & BIT(FLAGS_HMAC)) {
 991		if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) {
 992			struct omap_sham_hmac_ctx *bctx = tctx->base;
 993
 994			memcpy(ctx->buffer, bctx->ipad, bs);
 995			ctx->bufcnt = bs;
 996		}
 997
 
 
 998		ctx->flags |= BIT(FLAGS_HMAC);
 999	}
1000
1001	return 0;
1002
1003}
1004
1005static int omap_sham_update_req(struct omap_sham_dev *dd)
1006{
1007	struct ahash_request *req = dd->req;
1008	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1009	int err;
1010	bool final = (ctx->flags & BIT(FLAGS_FINUP)) &&
1011		!(dd->flags & BIT(FLAGS_HUGE));
1012
1013	dev_dbg(dd->dev, "update_req: total: %u, digcnt: %zd, final: %d",
1014		ctx->total, ctx->digcnt, final);
1015
1016	if (ctx->total < get_block_size(ctx) ||
1017	    ctx->total < dd->fallback_sz)
1018		ctx->flags |= BIT(FLAGS_CPU);
1019
1020	if (ctx->flags & BIT(FLAGS_CPU))
1021		err = omap_sham_xmit_cpu(dd, ctx->total, final);
1022	else
1023		err = omap_sham_xmit_dma(dd, ctx->total, final);
1024
1025	/* wait for dma completion before can take more data */
1026	dev_dbg(dd->dev, "update: err: %d, digcnt: %zd\n", err, ctx->digcnt);
1027
1028	return err;
1029}
1030
1031static int omap_sham_final_req(struct omap_sham_dev *dd)
1032{
1033	struct ahash_request *req = dd->req;
1034	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1035	int err = 0, use_dma = 1;
1036
1037	if (dd->flags & BIT(FLAGS_HUGE))
1038		return 0;
1039
1040	if ((ctx->total <= get_block_size(ctx)) || dd->polling_mode)
1041		/*
1042		 * faster to handle last block with cpu or
1043		 * use cpu when dma is not present.
1044		 */
1045		use_dma = 0;
1046
1047	if (use_dma)
1048		err = omap_sham_xmit_dma(dd, ctx->total, 1);
1049	else
1050		err = omap_sham_xmit_cpu(dd, ctx->total, 1);
1051
1052	ctx->bufcnt = 0;
1053
1054	dev_dbg(dd->dev, "final_req: err: %d\n", err);
1055
1056	return err;
1057}
1058
1059static int omap_sham_hash_one_req(struct crypto_engine *engine, void *areq)
1060{
1061	struct ahash_request *req = container_of(areq, struct ahash_request,
1062						 base);
1063	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1064	struct omap_sham_dev *dd = ctx->dd;
1065	int err;
1066	bool final = (ctx->flags & BIT(FLAGS_FINUP)) &&
1067			!(dd->flags & BIT(FLAGS_HUGE));
1068
1069	dev_dbg(dd->dev, "hash-one: op: %u, total: %u, digcnt: %zd, final: %d",
1070		ctx->op, ctx->total, ctx->digcnt, final);
1071
1072	err = omap_sham_prepare_request(engine, areq);
1073	if (err)
1074		return err;
1075
1076	err = pm_runtime_resume_and_get(dd->dev);
1077	if (err < 0) {
1078		dev_err(dd->dev, "failed to get sync: %d\n", err);
1079		return err;
1080	}
1081
1082	dd->err = 0;
1083	dd->req = req;
1084
1085	if (ctx->digcnt)
1086		dd->pdata->copy_hash(req, 0);
1087
1088	if (ctx->op == OP_UPDATE)
1089		err = omap_sham_update_req(dd);
1090	else if (ctx->op == OP_FINAL)
1091		err = omap_sham_final_req(dd);
1092
1093	if (err != -EINPROGRESS)
1094		omap_sham_finish_req(req, err);
1095
1096	return 0;
1097}
1098
1099static int omap_sham_finish_hmac(struct ahash_request *req)
1100{
1101	struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1102	struct omap_sham_hmac_ctx *bctx = tctx->base;
1103	int bs = crypto_shash_blocksize(bctx->shash);
1104	int ds = crypto_shash_digestsize(bctx->shash);
1105	SHASH_DESC_ON_STACK(shash, bctx->shash);
1106
1107	shash->tfm = bctx->shash;
1108
1109	return crypto_shash_init(shash) ?:
1110	       crypto_shash_update(shash, bctx->opad, bs) ?:
1111	       crypto_shash_finup(shash, req->result, ds, req->result);
 
 
 
 
1112}
1113
1114static int omap_sham_finish(struct ahash_request *req)
1115{
1116	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1117	struct omap_sham_dev *dd = ctx->dd;
1118	int err = 0;
1119
1120	if (ctx->digcnt) {
1121		omap_sham_copy_ready_hash(req);
1122		if ((ctx->flags & BIT(FLAGS_HMAC)) &&
1123				!test_bit(FLAGS_AUTO_XOR, &dd->flags))
1124			err = omap_sham_finish_hmac(req);
1125	}
1126
1127	dev_dbg(dd->dev, "digcnt: %zd, bufcnt: %zd\n", ctx->digcnt, ctx->bufcnt);
1128
1129	return err;
1130}
1131
1132static void omap_sham_finish_req(struct ahash_request *req, int err)
1133{
1134	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1135	struct omap_sham_dev *dd = ctx->dd;
1136
1137	if (test_bit(FLAGS_SGS_COPIED, &dd->flags))
1138		free_pages((unsigned long)sg_virt(ctx->sg),
1139			   get_order(ctx->sg->length));
1140
1141	if (test_bit(FLAGS_SGS_ALLOCED, &dd->flags))
1142		kfree(ctx->sg);
1143
1144	ctx->sg = NULL;
1145
1146	dd->flags &= ~(BIT(FLAGS_SGS_ALLOCED) | BIT(FLAGS_SGS_COPIED) |
1147		       BIT(FLAGS_CPU) | BIT(FLAGS_DMA_READY) |
1148		       BIT(FLAGS_OUTPUT_READY));
1149
1150	if (!err)
1151		dd->pdata->copy_hash(req, 1);
1152
1153	if (dd->flags & BIT(FLAGS_HUGE)) {
1154		/* Re-enqueue the request */
1155		omap_sham_enqueue(req, ctx->op);
1156		return;
1157	}
1158
1159	if (!err) {
 
1160		if (test_bit(FLAGS_FINAL, &dd->flags))
1161			err = omap_sham_finish(req);
1162	} else {
1163		ctx->flags |= BIT(FLAGS_ERROR);
1164	}
1165
1166	/* atomic operation is not needed here */
1167	dd->flags &= ~(BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) |
1168			BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY));
 
1169
1170	pm_runtime_mark_last_busy(dd->dev);
1171	pm_runtime_put_autosuspend(dd->dev);
1172
1173	ctx->offset = 0;
1174
1175	crypto_finalize_hash_request(dd->engine, req, err);
1176}
1177
1178static int omap_sham_handle_queue(struct omap_sham_dev *dd,
1179				  struct ahash_request *req)
1180{
1181	return crypto_transfer_hash_request_to_engine(dd->engine, req);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1182}
1183
1184static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
1185{
1186	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1187	struct omap_sham_dev *dd = ctx->dd;
 
1188
1189	ctx->op = op;
1190
1191	return omap_sham_handle_queue(dd, req);
1192}
1193
1194static int omap_sham_update(struct ahash_request *req)
1195{
1196	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1197	struct omap_sham_dev *dd = omap_sham_find_dev(ctx);
1198
1199	if (!req->nbytes)
1200		return 0;
1201
1202	if (ctx->bufcnt + req->nbytes <= ctx->buflen) {
1203		scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
1204					 0, req->nbytes, 0);
1205		ctx->bufcnt += req->nbytes;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1206		return 0;
1207	}
1208
1209	if (dd->polling_mode)
1210		ctx->flags |= BIT(FLAGS_CPU);
1211
1212	return omap_sham_enqueue(req, OP_UPDATE);
1213}
1214
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1215static int omap_sham_final_shash(struct ahash_request *req)
1216{
1217	struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1218	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1219	int offset = 0;
1220
1221	/*
1222	 * If we are running HMAC on limited hardware support, skip
1223	 * the ipad in the beginning of the buffer if we are going for
1224	 * software fallback algorithm.
1225	 */
1226	if (test_bit(FLAGS_HMAC, &ctx->flags) &&
1227	    !test_bit(FLAGS_AUTO_XOR, &ctx->dd->flags))
1228		offset = get_block_size(ctx);
1229
1230	return crypto_shash_tfm_digest(tctx->fallback, ctx->buffer + offset,
1231				       ctx->bufcnt - offset, req->result);
1232}
1233
1234static int omap_sham_final(struct ahash_request *req)
1235{
1236	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1237
1238	ctx->flags |= BIT(FLAGS_FINUP);
1239
1240	if (ctx->flags & BIT(FLAGS_ERROR))
1241		return 0; /* uncompleted hash is not needed */
1242
1243	/*
1244	 * OMAP HW accel works only with buffers >= 9.
1245	 * HMAC is always >= 9 because ipad == block size.
1246	 * If buffersize is less than fallback_sz, we use fallback
1247	 * SW encoding, as using DMA + HW in this case doesn't provide
1248	 * any benefit.
1249	 */
1250	if (!ctx->digcnt && ctx->bufcnt < ctx->dd->fallback_sz)
1251		return omap_sham_final_shash(req);
1252	else if (ctx->bufcnt)
1253		return omap_sham_enqueue(req, OP_FINAL);
1254
1255	/* copy ready hash (+ finalize hmac) */
1256	return omap_sham_finish(req);
1257}
1258
1259static int omap_sham_finup(struct ahash_request *req)
1260{
1261	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1262	int err1, err2;
1263
1264	ctx->flags |= BIT(FLAGS_FINUP);
1265
1266	err1 = omap_sham_update(req);
1267	if (err1 == -EINPROGRESS || err1 == -EBUSY)
1268		return err1;
1269	/*
1270	 * final() has to be always called to cleanup resources
1271	 * even if udpate() failed, except EINPROGRESS
1272	 */
1273	err2 = omap_sham_final(req);
1274
1275	return err1 ?: err2;
1276}
1277
1278static int omap_sham_digest(struct ahash_request *req)
1279{
1280	return omap_sham_init(req) ?: omap_sham_finup(req);
1281}
1282
1283static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
1284		      unsigned int keylen)
1285{
1286	struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
1287	struct omap_sham_hmac_ctx *bctx = tctx->base;
1288	int bs = crypto_shash_blocksize(bctx->shash);
1289	int ds = crypto_shash_digestsize(bctx->shash);
1290	int err, i;
1291
1292	err = crypto_shash_setkey(tctx->fallback, key, keylen);
1293	if (err)
1294		return err;
1295
1296	if (keylen > bs) {
1297		err = crypto_shash_tfm_digest(bctx->shash, key, keylen,
1298					      bctx->ipad);
 
1299		if (err)
1300			return err;
1301		keylen = ds;
1302	} else {
1303		memcpy(bctx->ipad, key, keylen);
1304	}
1305
1306	memset(bctx->ipad + keylen, 0, bs - keylen);
 
1307
1308	if (!test_bit(FLAGS_AUTO_XOR, &sham.flags)) {
1309		memcpy(bctx->opad, bctx->ipad, bs);
1310
1311		for (i = 0; i < bs; i++) {
1312			bctx->ipad[i] ^= HMAC_IPAD_VALUE;
1313			bctx->opad[i] ^= HMAC_OPAD_VALUE;
1314		}
1315	}
1316
1317	return err;
1318}
1319
1320static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
1321{
1322	struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
1323	const char *alg_name = crypto_tfm_alg_name(tfm);
1324
1325	/* Allocate a fallback and abort if it failed. */
1326	tctx->fallback = crypto_alloc_shash(alg_name, 0,
1327					    CRYPTO_ALG_NEED_FALLBACK);
1328	if (IS_ERR(tctx->fallback)) {
1329		pr_err("omap-sham: fallback driver '%s' "
1330				"could not be loaded.\n", alg_name);
1331		return PTR_ERR(tctx->fallback);
1332	}
1333
1334	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1335				 sizeof(struct omap_sham_reqctx) + BUFLEN);
1336
1337	if (alg_base) {
1338		struct omap_sham_hmac_ctx *bctx = tctx->base;
1339		tctx->flags |= BIT(FLAGS_HMAC);
1340		bctx->shash = crypto_alloc_shash(alg_base, 0,
1341						CRYPTO_ALG_NEED_FALLBACK);
1342		if (IS_ERR(bctx->shash)) {
1343			pr_err("omap-sham: base driver '%s' "
1344					"could not be loaded.\n", alg_base);
1345			crypto_free_shash(tctx->fallback);
1346			return PTR_ERR(bctx->shash);
1347		}
1348
1349	}
1350
1351	return 0;
1352}
1353
1354static int omap_sham_cra_init(struct crypto_tfm *tfm)
1355{
1356	return omap_sham_cra_init_alg(tfm, NULL);
1357}
1358
1359static int omap_sham_cra_sha1_init(struct crypto_tfm *tfm)
1360{
1361	return omap_sham_cra_init_alg(tfm, "sha1");
1362}
1363
1364static int omap_sham_cra_sha224_init(struct crypto_tfm *tfm)
1365{
1366	return omap_sham_cra_init_alg(tfm, "sha224");
1367}
1368
1369static int omap_sham_cra_sha256_init(struct crypto_tfm *tfm)
1370{
1371	return omap_sham_cra_init_alg(tfm, "sha256");
1372}
1373
1374static int omap_sham_cra_md5_init(struct crypto_tfm *tfm)
1375{
1376	return omap_sham_cra_init_alg(tfm, "md5");
1377}
1378
1379static int omap_sham_cra_sha384_init(struct crypto_tfm *tfm)
1380{
1381	return omap_sham_cra_init_alg(tfm, "sha384");
1382}
1383
1384static int omap_sham_cra_sha512_init(struct crypto_tfm *tfm)
1385{
1386	return omap_sham_cra_init_alg(tfm, "sha512");
1387}
1388
1389static void omap_sham_cra_exit(struct crypto_tfm *tfm)
1390{
1391	struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
1392
1393	crypto_free_shash(tctx->fallback);
1394	tctx->fallback = NULL;
1395
1396	if (tctx->flags & BIT(FLAGS_HMAC)) {
1397		struct omap_sham_hmac_ctx *bctx = tctx->base;
1398		crypto_free_shash(bctx->shash);
1399	}
1400}
1401
1402static int omap_sham_export(struct ahash_request *req, void *out)
1403{
1404	struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
1405
1406	memcpy(out, rctx, sizeof(*rctx) + rctx->bufcnt);
1407
1408	return 0;
1409}
1410
1411static int omap_sham_import(struct ahash_request *req, const void *in)
1412{
1413	struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
1414	const struct omap_sham_reqctx *ctx_in = in;
1415
1416	memcpy(rctx, in, sizeof(*rctx) + ctx_in->bufcnt);
1417
1418	return 0;
1419}
1420
1421static struct ahash_engine_alg algs_sha1_md5[] = {
1422{
1423	.base.init		= omap_sham_init,
1424	.base.update		= omap_sham_update,
1425	.base.final		= omap_sham_final,
1426	.base.finup		= omap_sham_finup,
1427	.base.digest		= omap_sham_digest,
1428	.base.halg.digestsize	= SHA1_DIGEST_SIZE,
1429	.base.halg.base	= {
1430		.cra_name		= "sha1",
1431		.cra_driver_name	= "omap-sha1",
1432		.cra_priority		= 400,
1433		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1434						CRYPTO_ALG_ASYNC |
1435						CRYPTO_ALG_NEED_FALLBACK,
1436		.cra_blocksize		= SHA1_BLOCK_SIZE,
1437		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
 
1438		.cra_module		= THIS_MODULE,
1439		.cra_init		= omap_sham_cra_init,
1440		.cra_exit		= omap_sham_cra_exit,
1441	},
1442	.op.do_one_request = omap_sham_hash_one_req,
1443},
1444{
1445	.base.init		= omap_sham_init,
1446	.base.update		= omap_sham_update,
1447	.base.final		= omap_sham_final,
1448	.base.finup		= omap_sham_finup,
1449	.base.digest		= omap_sham_digest,
1450	.base.halg.digestsize	= MD5_DIGEST_SIZE,
1451	.base.halg.base	= {
1452		.cra_name		= "md5",
1453		.cra_driver_name	= "omap-md5",
1454		.cra_priority		= 400,
1455		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1456						CRYPTO_ALG_ASYNC |
1457						CRYPTO_ALG_NEED_FALLBACK,
1458		.cra_blocksize		= SHA1_BLOCK_SIZE,
1459		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
 
1460		.cra_module		= THIS_MODULE,
1461		.cra_init		= omap_sham_cra_init,
1462		.cra_exit		= omap_sham_cra_exit,
1463	},
1464	.op.do_one_request = omap_sham_hash_one_req,
1465},
1466{
1467	.base.init		= omap_sham_init,
1468	.base.update		= omap_sham_update,
1469	.base.final		= omap_sham_final,
1470	.base.finup		= omap_sham_finup,
1471	.base.digest		= omap_sham_digest,
1472	.base.setkey		= omap_sham_setkey,
1473	.base.halg.digestsize	= SHA1_DIGEST_SIZE,
1474	.base.halg.base	= {
1475		.cra_name		= "hmac(sha1)",
1476		.cra_driver_name	= "omap-hmac-sha1",
1477		.cra_priority		= 400,
1478		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1479						CRYPTO_ALG_ASYNC |
1480						CRYPTO_ALG_NEED_FALLBACK,
1481		.cra_blocksize		= SHA1_BLOCK_SIZE,
1482		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
1483					sizeof(struct omap_sham_hmac_ctx),
 
1484		.cra_module		= THIS_MODULE,
1485		.cra_init		= omap_sham_cra_sha1_init,
1486		.cra_exit		= omap_sham_cra_exit,
1487	},
1488	.op.do_one_request = omap_sham_hash_one_req,
1489},
1490{
1491	.base.init		= omap_sham_init,
1492	.base.update		= omap_sham_update,
1493	.base.final		= omap_sham_final,
1494	.base.finup		= omap_sham_finup,
1495	.base.digest		= omap_sham_digest,
1496	.base.setkey		= omap_sham_setkey,
1497	.base.halg.digestsize	= MD5_DIGEST_SIZE,
1498	.base.halg.base	= {
1499		.cra_name		= "hmac(md5)",
1500		.cra_driver_name	= "omap-hmac-md5",
1501		.cra_priority		= 400,
1502		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1503						CRYPTO_ALG_ASYNC |
1504						CRYPTO_ALG_NEED_FALLBACK,
1505		.cra_blocksize		= SHA1_BLOCK_SIZE,
1506		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
1507					sizeof(struct omap_sham_hmac_ctx),
 
1508		.cra_module		= THIS_MODULE,
1509		.cra_init		= omap_sham_cra_md5_init,
1510		.cra_exit		= omap_sham_cra_exit,
1511	},
1512	.op.do_one_request = omap_sham_hash_one_req,
1513}
1514};
1515
1516/* OMAP4 has some algs in addition to what OMAP2 has */
1517static struct ahash_engine_alg algs_sha224_sha256[] = {
1518{
1519	.base.init		= omap_sham_init,
1520	.base.update		= omap_sham_update,
1521	.base.final		= omap_sham_final,
1522	.base.finup		= omap_sham_finup,
1523	.base.digest		= omap_sham_digest,
1524	.base.halg.digestsize	= SHA224_DIGEST_SIZE,
1525	.base.halg.base	= {
1526		.cra_name		= "sha224",
1527		.cra_driver_name	= "omap-sha224",
1528		.cra_priority		= 400,
1529		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1530						CRYPTO_ALG_ASYNC |
1531						CRYPTO_ALG_NEED_FALLBACK,
1532		.cra_blocksize		= SHA224_BLOCK_SIZE,
1533		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
1534		.cra_module		= THIS_MODULE,
1535		.cra_init		= omap_sham_cra_init,
1536		.cra_exit		= omap_sham_cra_exit,
1537	},
1538	.op.do_one_request = omap_sham_hash_one_req,
1539},
1540{
1541	.base.init		= omap_sham_init,
1542	.base.update		= omap_sham_update,
1543	.base.final		= omap_sham_final,
1544	.base.finup		= omap_sham_finup,
1545	.base.digest		= omap_sham_digest,
1546	.base.halg.digestsize	= SHA256_DIGEST_SIZE,
1547	.base.halg.base	= {
1548		.cra_name		= "sha256",
1549		.cra_driver_name	= "omap-sha256",
1550		.cra_priority		= 400,
1551		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1552						CRYPTO_ALG_ASYNC |
1553						CRYPTO_ALG_NEED_FALLBACK,
1554		.cra_blocksize		= SHA256_BLOCK_SIZE,
1555		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
1556		.cra_module		= THIS_MODULE,
1557		.cra_init		= omap_sham_cra_init,
1558		.cra_exit		= omap_sham_cra_exit,
1559	},
1560	.op.do_one_request = omap_sham_hash_one_req,
1561},
1562{
1563	.base.init		= omap_sham_init,
1564	.base.update		= omap_sham_update,
1565	.base.final		= omap_sham_final,
1566	.base.finup		= omap_sham_finup,
1567	.base.digest		= omap_sham_digest,
1568	.base.setkey		= omap_sham_setkey,
1569	.base.halg.digestsize	= SHA224_DIGEST_SIZE,
1570	.base.halg.base	= {
1571		.cra_name		= "hmac(sha224)",
1572		.cra_driver_name	= "omap-hmac-sha224",
1573		.cra_priority		= 400,
1574		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1575						CRYPTO_ALG_ASYNC |
1576						CRYPTO_ALG_NEED_FALLBACK,
1577		.cra_blocksize		= SHA224_BLOCK_SIZE,
1578		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
1579					sizeof(struct omap_sham_hmac_ctx),
1580		.cra_module		= THIS_MODULE,
1581		.cra_init		= omap_sham_cra_sha224_init,
1582		.cra_exit		= omap_sham_cra_exit,
1583	},
1584	.op.do_one_request = omap_sham_hash_one_req,
1585},
1586{
1587	.base.init		= omap_sham_init,
1588	.base.update		= omap_sham_update,
1589	.base.final		= omap_sham_final,
1590	.base.finup		= omap_sham_finup,
1591	.base.digest		= omap_sham_digest,
1592	.base.setkey		= omap_sham_setkey,
1593	.base.halg.digestsize	= SHA256_DIGEST_SIZE,
1594	.base.halg.base	= {
1595		.cra_name		= "hmac(sha256)",
1596		.cra_driver_name	= "omap-hmac-sha256",
1597		.cra_priority		= 400,
1598		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1599						CRYPTO_ALG_ASYNC |
1600						CRYPTO_ALG_NEED_FALLBACK,
1601		.cra_blocksize		= SHA256_BLOCK_SIZE,
1602		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
1603					sizeof(struct omap_sham_hmac_ctx),
1604		.cra_module		= THIS_MODULE,
1605		.cra_init		= omap_sham_cra_sha256_init,
1606		.cra_exit		= omap_sham_cra_exit,
1607	},
1608	.op.do_one_request = omap_sham_hash_one_req,
1609},
1610};
1611
1612static struct ahash_engine_alg algs_sha384_sha512[] = {
1613{
1614	.base.init		= omap_sham_init,
1615	.base.update		= omap_sham_update,
1616	.base.final		= omap_sham_final,
1617	.base.finup		= omap_sham_finup,
1618	.base.digest		= omap_sham_digest,
1619	.base.halg.digestsize	= SHA384_DIGEST_SIZE,
1620	.base.halg.base	= {
1621		.cra_name		= "sha384",
1622		.cra_driver_name	= "omap-sha384",
1623		.cra_priority		= 400,
1624		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1625						CRYPTO_ALG_ASYNC |
1626						CRYPTO_ALG_NEED_FALLBACK,
1627		.cra_blocksize		= SHA384_BLOCK_SIZE,
1628		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
1629		.cra_module		= THIS_MODULE,
1630		.cra_init		= omap_sham_cra_init,
1631		.cra_exit		= omap_sham_cra_exit,
1632	},
1633	.op.do_one_request = omap_sham_hash_one_req,
1634},
1635{
1636	.base.init		= omap_sham_init,
1637	.base.update		= omap_sham_update,
1638	.base.final		= omap_sham_final,
1639	.base.finup		= omap_sham_finup,
1640	.base.digest		= omap_sham_digest,
1641	.base.halg.digestsize	= SHA512_DIGEST_SIZE,
1642	.base.halg.base	= {
1643		.cra_name		= "sha512",
1644		.cra_driver_name	= "omap-sha512",
1645		.cra_priority		= 400,
1646		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1647						CRYPTO_ALG_ASYNC |
1648						CRYPTO_ALG_NEED_FALLBACK,
1649		.cra_blocksize		= SHA512_BLOCK_SIZE,
1650		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
1651		.cra_module		= THIS_MODULE,
1652		.cra_init		= omap_sham_cra_init,
1653		.cra_exit		= omap_sham_cra_exit,
1654	},
1655	.op.do_one_request = omap_sham_hash_one_req,
1656},
1657{
1658	.base.init		= omap_sham_init,
1659	.base.update		= omap_sham_update,
1660	.base.final		= omap_sham_final,
1661	.base.finup		= omap_sham_finup,
1662	.base.digest		= omap_sham_digest,
1663	.base.setkey		= omap_sham_setkey,
1664	.base.halg.digestsize	= SHA384_DIGEST_SIZE,
1665	.base.halg.base	= {
1666		.cra_name		= "hmac(sha384)",
1667		.cra_driver_name	= "omap-hmac-sha384",
1668		.cra_priority		= 400,
1669		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1670						CRYPTO_ALG_ASYNC |
1671						CRYPTO_ALG_NEED_FALLBACK,
1672		.cra_blocksize		= SHA384_BLOCK_SIZE,
1673		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
1674					sizeof(struct omap_sham_hmac_ctx),
1675		.cra_module		= THIS_MODULE,
1676		.cra_init		= omap_sham_cra_sha384_init,
1677		.cra_exit		= omap_sham_cra_exit,
1678	},
1679	.op.do_one_request = omap_sham_hash_one_req,
1680},
1681{
1682	.base.init		= omap_sham_init,
1683	.base.update		= omap_sham_update,
1684	.base.final		= omap_sham_final,
1685	.base.finup		= omap_sham_finup,
1686	.base.digest		= omap_sham_digest,
1687	.base.setkey		= omap_sham_setkey,
1688	.base.halg.digestsize	= SHA512_DIGEST_SIZE,
1689	.base.halg.base	= {
1690		.cra_name		= "hmac(sha512)",
1691		.cra_driver_name	= "omap-hmac-sha512",
1692		.cra_priority		= 400,
1693		.cra_flags		= CRYPTO_ALG_KERN_DRIVER_ONLY |
1694						CRYPTO_ALG_ASYNC |
1695						CRYPTO_ALG_NEED_FALLBACK,
1696		.cra_blocksize		= SHA512_BLOCK_SIZE,
1697		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
1698					sizeof(struct omap_sham_hmac_ctx),
1699		.cra_module		= THIS_MODULE,
1700		.cra_init		= omap_sham_cra_sha512_init,
1701		.cra_exit		= omap_sham_cra_exit,
1702	},
1703	.op.do_one_request = omap_sham_hash_one_req,
1704},
1705};
1706
1707static void omap_sham_done_task(unsigned long data)
1708{
1709	struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
1710	int err = 0;
1711
1712	dev_dbg(dd->dev, "%s: flags=%lx\n", __func__, dd->flags);
 
 
 
1713
1714	if (test_bit(FLAGS_CPU, &dd->flags)) {
1715		if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags))
1716			goto finish;
1717	} else if (test_bit(FLAGS_DMA_READY, &dd->flags)) {
1718		if (test_bit(FLAGS_DMA_ACTIVE, &dd->flags)) {
1719			omap_sham_update_dma_stop(dd);
1720			if (dd->err) {
1721				err = dd->err;
1722				goto finish;
1723			}
1724		}
1725		if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) {
1726			/* hash or semi-hash ready */
1727			clear_bit(FLAGS_DMA_READY, &dd->flags);
1728			goto finish;
 
 
1729		}
1730	}
1731
1732	return;
1733
1734finish:
1735	dev_dbg(dd->dev, "update done: err: %d\n", err);
1736	/* finish curent request */
1737	omap_sham_finish_req(dd->req, err);
1738}
1739
1740static irqreturn_t omap_sham_irq_common(struct omap_sham_dev *dd)
1741{
1742	set_bit(FLAGS_OUTPUT_READY, &dd->flags);
1743	tasklet_schedule(&dd->done_task);
1744
1745	return IRQ_HANDLED;
1746}
1747
1748static irqreturn_t omap_sham_irq_omap2(int irq, void *dev_id)
1749{
1750	struct omap_sham_dev *dd = dev_id;
1751
1752	if (unlikely(test_bit(FLAGS_FINAL, &dd->flags)))
1753		/* final -> allow device to go to power-saving mode */
1754		omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH);
1755
1756	omap_sham_write_mask(dd, SHA_REG_CTRL, SHA_REG_CTRL_OUTPUT_READY,
1757				 SHA_REG_CTRL_OUTPUT_READY);
1758	omap_sham_read(dd, SHA_REG_CTRL);
1759
1760	return omap_sham_irq_common(dd);
1761}
1762
1763static irqreturn_t omap_sham_irq_omap4(int irq, void *dev_id)
1764{
1765	struct omap_sham_dev *dd = dev_id;
1766
1767	omap_sham_write_mask(dd, SHA_REG_MASK(dd), 0, SHA_REG_MASK_IT_EN);
1768
1769	return omap_sham_irq_common(dd);
1770}
1771
1772static struct omap_sham_algs_info omap_sham_algs_info_omap2[] = {
1773	{
1774		.algs_list	= algs_sha1_md5,
1775		.size		= ARRAY_SIZE(algs_sha1_md5),
1776	},
1777};
1778
1779static const struct omap_sham_pdata omap_sham_pdata_omap2 = {
1780	.algs_info	= omap_sham_algs_info_omap2,
1781	.algs_info_size	= ARRAY_SIZE(omap_sham_algs_info_omap2),
1782	.flags		= BIT(FLAGS_BE32_SHA1),
1783	.digest_size	= SHA1_DIGEST_SIZE,
1784	.copy_hash	= omap_sham_copy_hash_omap2,
1785	.write_ctrl	= omap_sham_write_ctrl_omap2,
1786	.trigger	= omap_sham_trigger_omap2,
1787	.poll_irq	= omap_sham_poll_irq_omap2,
1788	.intr_hdlr	= omap_sham_irq_omap2,
1789	.idigest_ofs	= 0x00,
1790	.din_ofs	= 0x1c,
1791	.digcnt_ofs	= 0x14,
1792	.rev_ofs	= 0x5c,
1793	.mask_ofs	= 0x60,
1794	.sysstatus_ofs	= 0x64,
1795	.major_mask	= 0xf0,
1796	.major_shift	= 4,
1797	.minor_mask	= 0x0f,
1798	.minor_shift	= 0,
1799};
1800
1801#ifdef CONFIG_OF
1802static struct omap_sham_algs_info omap_sham_algs_info_omap4[] = {
1803	{
1804		.algs_list	= algs_sha1_md5,
1805		.size		= ARRAY_SIZE(algs_sha1_md5),
1806	},
1807	{
1808		.algs_list	= algs_sha224_sha256,
1809		.size		= ARRAY_SIZE(algs_sha224_sha256),
1810	},
1811};
1812
1813static const struct omap_sham_pdata omap_sham_pdata_omap4 = {
1814	.algs_info	= omap_sham_algs_info_omap4,
1815	.algs_info_size	= ARRAY_SIZE(omap_sham_algs_info_omap4),
1816	.flags		= BIT(FLAGS_AUTO_XOR),
1817	.digest_size	= SHA256_DIGEST_SIZE,
1818	.copy_hash	= omap_sham_copy_hash_omap4,
1819	.write_ctrl	= omap_sham_write_ctrl_omap4,
1820	.trigger	= omap_sham_trigger_omap4,
1821	.poll_irq	= omap_sham_poll_irq_omap4,
1822	.intr_hdlr	= omap_sham_irq_omap4,
1823	.idigest_ofs	= 0x020,
1824	.odigest_ofs	= 0x0,
1825	.din_ofs	= 0x080,
1826	.digcnt_ofs	= 0x040,
1827	.rev_ofs	= 0x100,
1828	.mask_ofs	= 0x110,
1829	.sysstatus_ofs	= 0x114,
1830	.mode_ofs	= 0x44,
1831	.length_ofs	= 0x48,
1832	.major_mask	= 0x0700,
1833	.major_shift	= 8,
1834	.minor_mask	= 0x003f,
1835	.minor_shift	= 0,
1836};
1837
1838static struct omap_sham_algs_info omap_sham_algs_info_omap5[] = {
1839	{
1840		.algs_list	= algs_sha1_md5,
1841		.size		= ARRAY_SIZE(algs_sha1_md5),
1842	},
1843	{
1844		.algs_list	= algs_sha224_sha256,
1845		.size		= ARRAY_SIZE(algs_sha224_sha256),
1846	},
1847	{
1848		.algs_list	= algs_sha384_sha512,
1849		.size		= ARRAY_SIZE(algs_sha384_sha512),
1850	},
1851};
1852
1853static const struct omap_sham_pdata omap_sham_pdata_omap5 = {
1854	.algs_info	= omap_sham_algs_info_omap5,
1855	.algs_info_size	= ARRAY_SIZE(omap_sham_algs_info_omap5),
1856	.flags		= BIT(FLAGS_AUTO_XOR),
1857	.digest_size	= SHA512_DIGEST_SIZE,
1858	.copy_hash	= omap_sham_copy_hash_omap4,
1859	.write_ctrl	= omap_sham_write_ctrl_omap4,
1860	.trigger	= omap_sham_trigger_omap4,
1861	.poll_irq	= omap_sham_poll_irq_omap4,
1862	.intr_hdlr	= omap_sham_irq_omap4,
1863	.idigest_ofs	= 0x240,
1864	.odigest_ofs	= 0x200,
1865	.din_ofs	= 0x080,
1866	.digcnt_ofs	= 0x280,
1867	.rev_ofs	= 0x100,
1868	.mask_ofs	= 0x110,
1869	.sysstatus_ofs	= 0x114,
1870	.mode_ofs	= 0x284,
1871	.length_ofs	= 0x288,
1872	.major_mask	= 0x0700,
1873	.major_shift	= 8,
1874	.minor_mask	= 0x003f,
1875	.minor_shift	= 0,
1876};
1877
1878static const struct of_device_id omap_sham_of_match[] = {
1879	{
1880		.compatible	= "ti,omap2-sham",
1881		.data		= &omap_sham_pdata_omap2,
1882	},
1883	{
1884		.compatible	= "ti,omap3-sham",
1885		.data		= &omap_sham_pdata_omap2,
1886	},
1887	{
1888		.compatible	= "ti,omap4-sham",
1889		.data		= &omap_sham_pdata_omap4,
1890	},
1891	{
1892		.compatible	= "ti,omap5-sham",
1893		.data		= &omap_sham_pdata_omap5,
1894	},
1895	{},
1896};
1897MODULE_DEVICE_TABLE(of, omap_sham_of_match);
1898
1899static int omap_sham_get_res_of(struct omap_sham_dev *dd,
1900		struct device *dev, struct resource *res)
1901{
1902	struct device_node *node = dev->of_node;
1903	int err = 0;
1904
1905	dd->pdata = of_device_get_match_data(dev);
1906	if (!dd->pdata) {
1907		dev_err(dev, "no compatible OF match\n");
1908		err = -EINVAL;
1909		goto err;
1910	}
1911
1912	err = of_address_to_resource(node, 0, res);
1913	if (err < 0) {
1914		dev_err(dev, "can't translate OF node address\n");
1915		err = -EINVAL;
1916		goto err;
1917	}
1918
1919	dd->irq = irq_of_parse_and_map(node, 0);
1920	if (!dd->irq) {
1921		dev_err(dev, "can't translate OF irq value\n");
1922		err = -EINVAL;
1923		goto err;
1924	}
1925
1926err:
1927	return err;
1928}
1929#else
1930static const struct of_device_id omap_sham_of_match[] = {
1931	{},
1932};
1933
1934static int omap_sham_get_res_of(struct omap_sham_dev *dd,
1935		struct device *dev, struct resource *res)
1936{
1937	return -EINVAL;
1938}
1939#endif
1940
1941static int omap_sham_get_res_pdev(struct omap_sham_dev *dd,
1942		struct platform_device *pdev, struct resource *res)
1943{
1944	struct device *dev = &pdev->dev;
1945	struct resource *r;
1946	int err = 0;
1947
1948	/* Get the base address */
1949	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1950	if (!r) {
1951		dev_err(dev, "no MEM resource info\n");
1952		err = -ENODEV;
1953		goto err;
1954	}
1955	memcpy(res, r, sizeof(*res));
1956
1957	/* Get the IRQ */
1958	dd->irq = platform_get_irq(pdev, 0);
1959	if (dd->irq < 0) {
1960		err = dd->irq;
1961		goto err;
1962	}
1963
1964	/* Only OMAP2/3 can be non-DT */
1965	dd->pdata = &omap_sham_pdata_omap2;
1966
1967err:
1968	return err;
1969}
1970
1971static ssize_t fallback_show(struct device *dev, struct device_attribute *attr,
1972			     char *buf)
1973{
1974	struct omap_sham_dev *dd = dev_get_drvdata(dev);
1975
1976	return sprintf(buf, "%d\n", dd->fallback_sz);
1977}
1978
1979static ssize_t fallback_store(struct device *dev, struct device_attribute *attr,
1980			      const char *buf, size_t size)
1981{
1982	struct omap_sham_dev *dd = dev_get_drvdata(dev);
1983	ssize_t status;
1984	long value;
1985
1986	status = kstrtol(buf, 0, &value);
1987	if (status)
1988		return status;
1989
1990	/* HW accelerator only works with buffers > 9 */
1991	if (value < 9) {
1992		dev_err(dev, "minimum fallback size 9\n");
1993		return -EINVAL;
 
1994	}
1995
1996	dd->fallback_sz = value;
1997
1998	return size;
1999}
2000
2001static ssize_t queue_len_show(struct device *dev, struct device_attribute *attr,
2002			      char *buf)
2003{
2004	struct omap_sham_dev *dd = dev_get_drvdata(dev);
2005
2006	return sprintf(buf, "%d\n", dd->queue.max_qlen);
2007}
2008
2009static ssize_t queue_len_store(struct device *dev,
2010			       struct device_attribute *attr, const char *buf,
2011			       size_t size)
2012{
2013	struct omap_sham_dev *dd = dev_get_drvdata(dev);
2014	ssize_t status;
2015	long value;
2016
2017	status = kstrtol(buf, 0, &value);
2018	if (status)
2019		return status;
2020
2021	if (value < 1)
2022		return -EINVAL;
2023
2024	/*
2025	 * Changing the queue size in fly is safe, if size becomes smaller
2026	 * than current size, it will just not accept new entries until
2027	 * it has shrank enough.
2028	 */
2029	dd->queue.max_qlen = value;
2030
2031	return size;
2032}
2033
2034static DEVICE_ATTR_RW(queue_len);
2035static DEVICE_ATTR_RW(fallback);
2036
2037static struct attribute *omap_sham_attrs[] = {
2038	&dev_attr_queue_len.attr,
2039	&dev_attr_fallback.attr,
2040	NULL,
2041};
2042
2043static const struct attribute_group omap_sham_attr_group = {
2044	.attrs = omap_sham_attrs,
2045};
2046
2047static int omap_sham_probe(struct platform_device *pdev)
2048{
2049	struct omap_sham_dev *dd;
2050	struct device *dev = &pdev->dev;
2051	struct resource res;
2052	dma_cap_mask_t mask;
2053	int err, i, j;
2054	u32 rev;
2055
2056	dd = devm_kzalloc(dev, sizeof(struct omap_sham_dev), GFP_KERNEL);
2057	if (dd == NULL) {
2058		dev_err(dev, "unable to alloc data struct.\n");
2059		err = -ENOMEM;
2060		goto data_err;
2061	}
2062	dd->dev = dev;
2063	platform_set_drvdata(pdev, dd);
2064
2065	INIT_LIST_HEAD(&dd->list);
 
2066	tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd);
2067	crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH);
2068
2069	err = (dev->of_node) ? omap_sham_get_res_of(dd, dev, &res) :
2070			       omap_sham_get_res_pdev(dd, pdev, &res);
2071	if (err)
2072		goto data_err;
2073
2074	dd->io_base = devm_ioremap_resource(dev, &res);
2075	if (IS_ERR(dd->io_base)) {
2076		err = PTR_ERR(dd->io_base);
2077		goto data_err;
 
 
2078	}
2079	dd->phys_base = res.start;
2080
2081	err = devm_request_irq(dev, dd->irq, dd->pdata->intr_hdlr,
2082			       IRQF_TRIGGER_NONE, dev_name(dev), dd);
2083	if (err) {
2084		dev_err(dev, "unable to request irq %d, err = %d\n",
2085			dd->irq, err);
2086		goto data_err;
2087	}
 
2088
2089	dma_cap_zero(mask);
2090	dma_cap_set(DMA_SLAVE, mask);
2091
2092	dd->dma_lch = dma_request_chan(dev, "rx");
2093	if (IS_ERR(dd->dma_lch)) {
2094		err = PTR_ERR(dd->dma_lch);
2095		if (err == -EPROBE_DEFER)
2096			goto data_err;
2097
2098		dd->polling_mode = 1;
2099		dev_dbg(dev, "using polling mode instead of dma\n");
2100	}
2101
2102	dd->flags |= dd->pdata->flags;
2103	sham.flags |= dd->pdata->flags;
2104
2105	pm_runtime_use_autosuspend(dev);
2106	pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY);
2107
2108	dd->fallback_sz = OMAP_SHA_DMA_THRESHOLD;
2109
2110	pm_runtime_enable(dev);
2111
2112	err = pm_runtime_resume_and_get(dev);
2113	if (err < 0) {
2114		dev_err(dev, "failed to get sync: %d\n", err);
2115		goto err_pm;
2116	}
2117
2118	rev = omap_sham_read(dd, SHA_REG_REV(dd));
2119	pm_runtime_put_sync(&pdev->dev);
2120
2121	dev_info(dev, "hw accel on OMAP rev %u.%u\n",
2122		(rev & dd->pdata->major_mask) >> dd->pdata->major_shift,
2123		(rev & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
2124
2125	spin_lock_bh(&sham.lock);
2126	list_add_tail(&dd->list, &sham.dev_list);
2127	spin_unlock_bh(&sham.lock);
 
 
 
 
2128
2129	dd->engine = crypto_engine_alloc_init(dev, 1);
2130	if (!dd->engine) {
 
2131		err = -ENOMEM;
2132		goto err_engine;
2133	}
2134
2135	err = crypto_engine_start(dd->engine);
2136	if (err)
2137		goto err_engine_start;
2138
2139	for (i = 0; i < dd->pdata->algs_info_size; i++) {
2140		if (dd->pdata->algs_info[i].registered)
2141			break;
2142
2143		for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
2144			struct ahash_engine_alg *ealg;
2145			struct ahash_alg *alg;
2146
2147			ealg = &dd->pdata->algs_info[i].algs_list[j];
2148			alg = &ealg->base;
2149			alg->export = omap_sham_export;
2150			alg->import = omap_sham_import;
2151			alg->halg.statesize = sizeof(struct omap_sham_reqctx) +
2152					      BUFLEN;
2153			err = crypto_engine_register_ahash(ealg);
2154			if (err)
2155				goto err_algs;
2156
2157			dd->pdata->algs_info[i].registered++;
2158		}
2159	}
2160
2161	err = sysfs_create_group(&dev->kobj, &omap_sham_attr_group);
2162	if (err) {
2163		dev_err(dev, "could not create sysfs device attrs\n");
2164		goto err_algs;
2165	}
2166
2167	return 0;
2168
2169err_algs:
2170	for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
2171		for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
2172			crypto_engine_unregister_ahash(
2173					&dd->pdata->algs_info[i].algs_list[j]);
2174err_engine_start:
2175	crypto_engine_exit(dd->engine);
2176err_engine:
2177	spin_lock_bh(&sham.lock);
2178	list_del(&dd->list);
2179	spin_unlock_bh(&sham.lock);
2180err_pm:
2181	pm_runtime_dont_use_autosuspend(dev);
2182	pm_runtime_disable(dev);
2183	if (!dd->polling_mode)
2184		dma_release_channel(dd->dma_lch);
2185data_err:
2186	dev_err(dev, "initialization failed.\n");
2187
2188	return err;
2189}
2190
2191static void omap_sham_remove(struct platform_device *pdev)
2192{
2193	struct omap_sham_dev *dd;
2194	int i, j;
2195
2196	dd = platform_get_drvdata(pdev);
2197
2198	spin_lock_bh(&sham.lock);
 
2199	list_del(&dd->list);
2200	spin_unlock_bh(&sham.lock);
2201	for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
2202		for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) {
2203			crypto_engine_unregister_ahash(
2204					&dd->pdata->algs_info[i].algs_list[j]);
2205			dd->pdata->algs_info[i].registered--;
2206		}
2207	tasklet_kill(&dd->done_task);
2208	pm_runtime_dont_use_autosuspend(&pdev->dev);
2209	pm_runtime_disable(&pdev->dev);
 
 
 
 
 
2210
2211	if (!dd->polling_mode)
2212		dma_release_channel(dd->dma_lch);
2213
2214	sysfs_remove_group(&dd->dev->kobj, &omap_sham_attr_group);
2215}
2216
2217static struct platform_driver omap_sham_driver = {
2218	.probe	= omap_sham_probe,
2219	.remove_new = omap_sham_remove,
2220	.driver	= {
2221		.name	= "omap-sham",
2222		.of_match_table	= omap_sham_of_match,
2223	},
2224};
2225
2226module_platform_driver(omap_sham_driver);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2227
2228MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support.");
2229MODULE_LICENSE("GPL v2");
2230MODULE_AUTHOR("Dmitry Kasatkin");
2231MODULE_ALIAS("platform:omap-sham");
v3.1
 
   1/*
   2 * Cryptographic API.
   3 *
   4 * Support for OMAP SHA1/MD5 HW acceleration.
   5 *
   6 * Copyright (c) 2010 Nokia Corporation
   7 * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License version 2 as published
  11 * by the Free Software Foundation.
  12 *
  13 * Some ideas are from old omap-sha1-md5.c driver.
  14 */
  15
  16#define pr_fmt(fmt) "%s: " fmt, __func__
  17
 
 
 
 
 
 
  18#include <linux/err.h>
  19#include <linux/device.h>
  20#include <linux/module.h>
 
  21#include <linux/init.h>
  22#include <linux/errno.h>
  23#include <linux/interrupt.h>
 
 
  24#include <linux/kernel.h>
  25#include <linux/clk.h>
  26#include <linux/irq.h>
  27#include <linux/io.h>
 
  28#include <linux/platform_device.h>
 
  29#include <linux/scatterlist.h>
  30#include <linux/dma-mapping.h>
  31#include <linux/delay.h>
  32#include <linux/crypto.h>
  33#include <linux/cryptohash.h>
  34#include <crypto/scatterwalk.h>
  35#include <crypto/algapi.h>
  36#include <crypto/sha.h>
  37#include <crypto/hash.h>
  38#include <crypto/internal/hash.h>
  39
  40#include <plat/cpu.h>
  41#include <plat/dma.h>
  42#include <mach/irqs.h>
  43
  44#define SHA_REG_DIGEST(x)		(0x00 + ((x) * 0x04))
  45#define SHA_REG_DIN(x)			(0x1C + ((x) * 0x04))
 
  46
  47#define SHA1_MD5_BLOCK_SIZE		SHA1_BLOCK_SIZE
  48#define MD5_DIGEST_SIZE			16
  49
  50#define SHA_REG_DIGCNT			0x14
  51
  52#define SHA_REG_CTRL			0x18
  53#define SHA_REG_CTRL_LENGTH		(0xFFFFFFFF << 5)
  54#define SHA_REG_CTRL_CLOSE_HASH		(1 << 4)
  55#define SHA_REG_CTRL_ALGO_CONST		(1 << 3)
  56#define SHA_REG_CTRL_ALGO		(1 << 2)
  57#define SHA_REG_CTRL_INPUT_READY	(1 << 1)
  58#define SHA_REG_CTRL_OUTPUT_READY	(1 << 0)
  59
  60#define SHA_REG_REV			0x5C
  61#define SHA_REG_REV_MAJOR		0xF0
  62#define SHA_REG_REV_MINOR		0x0F
  63
  64#define SHA_REG_MASK			0x60
  65#define SHA_REG_MASK_DMA_EN		(1 << 3)
  66#define SHA_REG_MASK_IT_EN		(1 << 2)
  67#define SHA_REG_MASK_SOFTRESET		(1 << 1)
  68#define SHA_REG_AUTOIDLE		(1 << 0)
  69
  70#define SHA_REG_SYSSTATUS		0x64
  71#define SHA_REG_SYSSTATUS_RESETDONE	(1 << 0)
  72
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  73#define DEFAULT_TIMEOUT_INTERVAL	HZ
  74
 
 
  75/* mostly device flags */
  76#define FLAGS_BUSY		0
  77#define FLAGS_FINAL		1
  78#define FLAGS_DMA_ACTIVE	2
  79#define FLAGS_OUTPUT_READY	3
  80#define FLAGS_INIT		4
  81#define FLAGS_CPU		5
  82#define FLAGS_DMA_READY		6
 
 
 
 
 
 
  83/* context flags */
  84#define FLAGS_FINUP		16
  85#define FLAGS_SG		17
  86#define FLAGS_SHA1		18
  87#define FLAGS_HMAC		19
  88#define FLAGS_ERROR		20
  89
  90#define OP_UPDATE	1
  91#define OP_FINAL	2
 
 
 
 
 
 
 
 
 
 
 
 
  92
  93#define OMAP_ALIGN_MASK		(sizeof(u32)-1)
  94#define OMAP_ALIGNED		__attribute__((aligned(sizeof(u32))))
  95
  96#define BUFLEN		PAGE_SIZE
 
 
 
  97
  98struct omap_sham_dev;
  99
 100struct omap_sham_reqctx {
 101	struct omap_sham_dev	*dd;
 102	unsigned long		flags;
 103	unsigned long		op;
 104
 105	u8			digest[SHA1_DIGEST_SIZE] OMAP_ALIGNED;
 106	size_t			digcnt;
 107	size_t			bufcnt;
 108	size_t			buflen;
 109	dma_addr_t		dma_addr;
 110
 111	/* walk state */
 112	struct scatterlist	*sg;
 113	unsigned int		offset;	/* offset in current sg */
 
 
 114	unsigned int		total;	/* total request */
 115
 116	u8			buffer[0] OMAP_ALIGNED;
 117};
 118
 119struct omap_sham_hmac_ctx {
 120	struct crypto_shash	*shash;
 121	u8			ipad[SHA1_MD5_BLOCK_SIZE];
 122	u8			opad[SHA1_MD5_BLOCK_SIZE];
 123};
 124
 125struct omap_sham_ctx {
 126	struct omap_sham_dev	*dd;
 127
 128	unsigned long		flags;
 129
 130	/* fallback stuff */
 131	struct crypto_shash	*fallback;
 132
 133	struct omap_sham_hmac_ctx base[0];
 
 
 
 
 
 
 
 
 134};
 135
 136#define OMAP_SHAM_QUEUE_LENGTH	1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 137
 138struct omap_sham_dev {
 139	struct list_head	list;
 140	unsigned long		phys_base;
 141	struct device		*dev;
 142	void __iomem		*io_base;
 143	int			irq;
 144	struct clk		*iclk;
 145	spinlock_t		lock;
 146	int			err;
 147	int			dma;
 148	int			dma_lch;
 149	struct tasklet_struct	done_task;
 
 
 150
 151	unsigned long		flags;
 
 152	struct crypto_queue	queue;
 153	struct ahash_request	*req;
 
 
 
 154};
 155
 156struct omap_sham_drv {
 157	struct list_head	dev_list;
 158	spinlock_t		lock;
 159	unsigned long		flags;
 160};
 161
 162static struct omap_sham_drv sham = {
 163	.dev_list = LIST_HEAD_INIT(sham.dev_list),
 164	.lock = __SPIN_LOCK_UNLOCKED(sham.lock),
 165};
 166
 
 
 
 167static inline u32 omap_sham_read(struct omap_sham_dev *dd, u32 offset)
 168{
 169	return __raw_readl(dd->io_base + offset);
 170}
 171
 172static inline void omap_sham_write(struct omap_sham_dev *dd,
 173					u32 offset, u32 value)
 174{
 175	__raw_writel(value, dd->io_base + offset);
 176}
 177
 178static inline void omap_sham_write_mask(struct omap_sham_dev *dd, u32 address,
 179					u32 value, u32 mask)
 180{
 181	u32 val;
 182
 183	val = omap_sham_read(dd, address);
 184	val &= ~mask;
 185	val |= value;
 186	omap_sham_write(dd, address, val);
 187}
 188
 189static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit)
 190{
 191	unsigned long timeout = jiffies + DEFAULT_TIMEOUT_INTERVAL;
 192
 193	while (!(omap_sham_read(dd, offset) & bit)) {
 194		if (time_is_before_jiffies(timeout))
 195			return -ETIMEDOUT;
 196	}
 197
 198	return 0;
 199}
 200
 201static void omap_sham_copy_hash(struct ahash_request *req, int out)
 202{
 203	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 
 204	u32 *hash = (u32 *)ctx->digest;
 205	int i;
 206
 207	/* MD5 is almost unused. So copy sha1 size to reduce code */
 208	for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) {
 209		if (out)
 210			hash[i] = omap_sham_read(ctx->dd,
 211						SHA_REG_DIGEST(i));
 212		else
 213			omap_sham_write(ctx->dd,
 214					SHA_REG_DIGEST(i), hash[i]);
 215	}
 216}
 217
 218static void omap_sham_copy_ready_hash(struct ahash_request *req)
 219{
 220	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 221	u32 *in = (u32 *)ctx->digest;
 222	u32 *hash = (u32 *)req->result;
 223	int i;
 224
 225	if (!hash)
 226		return;
 
 
 
 227
 228	if (likely(ctx->flags & BIT(FLAGS_SHA1))) {
 229		/* SHA1 results are in big endian */
 230		for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++)
 231			hash[i] = be32_to_cpu(in[i]);
 232	} else {
 233		/* MD5 results are in little endian */
 234		for (i = 0; i < MD5_DIGEST_SIZE / sizeof(u32); i++)
 235			hash[i] = le32_to_cpu(in[i]);
 236	}
 
 
 237}
 238
 239static int omap_sham_hw_init(struct omap_sham_dev *dd)
 240{
 241	clk_enable(dd->iclk);
 
 
 
 242
 243	if (!test_bit(FLAGS_INIT, &dd->flags)) {
 244		omap_sham_write_mask(dd, SHA_REG_MASK,
 245			SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET);
 246
 247		if (omap_sham_wait(dd, SHA_REG_SYSSTATUS,
 248					SHA_REG_SYSSTATUS_RESETDONE))
 249			return -ETIMEDOUT;
 250
 251		set_bit(FLAGS_INIT, &dd->flags);
 252		dd->err = 0;
 253	}
 254
 255	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 256}
 257
 258static void omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length,
 259				 int final, int dma)
 260{
 261	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 262	u32 val = length << 5, mask;
 263
 264	if (likely(ctx->digcnt))
 265		omap_sham_write(dd, SHA_REG_DIGCNT, ctx->digcnt);
 266
 267	omap_sham_write_mask(dd, SHA_REG_MASK,
 268		SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0),
 269		SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
 270	/*
 271	 * Setting ALGO_CONST only for the first iteration
 272	 * and CLOSE_HASH only for the last one.
 273	 */
 274	if (ctx->flags & BIT(FLAGS_SHA1))
 275		val |= SHA_REG_CTRL_ALGO;
 276	if (!ctx->digcnt)
 277		val |= SHA_REG_CTRL_ALGO_CONST;
 278	if (final)
 279		val |= SHA_REG_CTRL_CLOSE_HASH;
 280
 281	mask = SHA_REG_CTRL_ALGO_CONST | SHA_REG_CTRL_CLOSE_HASH |
 282			SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH;
 283
 284	omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask);
 285}
 286
 287static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf,
 288			      size_t length, int final)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 289{
 290	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 291	int count, len32;
 292	const u32 *buffer = (const u32 *)buf;
 293
 294	dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 295						ctx->digcnt, length, final);
 296
 297	omap_sham_write_ctrl(dd, length, final, 0);
 
 298
 299	/* should be non-zero before next lines to disable clocks later */
 300	ctx->digcnt += length;
 301
 302	if (omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY))
 303		return -ETIMEDOUT;
 304
 305	if (final)
 306		set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
 307
 308	set_bit(FLAGS_CPU, &dd->flags);
 309
 310	len32 = DIV_ROUND_UP(length, sizeof(u32));
 
 311
 312	for (count = 0; count < len32; count++)
 313		omap_sham_write(dd, SHA_REG_DIN(count), buffer[count]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 314
 315	return -EINPROGRESS;
 316}
 317
 318static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
 319			      size_t length, int final)
 
 
 
 
 
 
 
 
 320{
 321	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 322	int len32;
 
 
 323
 324	dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n",
 325						ctx->digcnt, length, final);
 326
 327	len32 = DIV_ROUND_UP(length, sizeof(u32));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 328
 329	omap_set_dma_transfer_params(dd->dma_lch, OMAP_DMA_DATA_TYPE_S32, len32,
 330			1, OMAP_DMA_SYNC_PACKET, dd->dma,
 331				OMAP_DMA_DST_SYNC_PREFETCH);
 332
 333	omap_set_dma_src_params(dd->dma_lch, 0, OMAP_DMA_AMODE_POST_INC,
 334				dma_addr, 0, 0);
 
 
 335
 336	omap_sham_write_ctrl(dd, length, final, 1);
 
 
 
 337
 338	ctx->digcnt += length;
 
 339
 340	if (final)
 341		set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
 342
 343	set_bit(FLAGS_DMA_ACTIVE, &dd->flags);
 344
 345	omap_start_dma(dd->dma_lch);
 
 
 
 346
 347	return -EINPROGRESS;
 348}
 349
 350static size_t omap_sham_append_buffer(struct omap_sham_reqctx *ctx,
 351				const u8 *data, size_t length)
 352{
 353	size_t count = min(length, ctx->buflen - ctx->bufcnt);
 
 
 
 
 354
 355	count = min(count, ctx->total);
 356	if (count <= 0)
 357		return 0;
 358	memcpy(ctx->buffer + ctx->bufcnt, data, count);
 359	ctx->bufcnt += count;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 360
 361	return count;
 362}
 
 
 
 363
 364static size_t omap_sham_append_sg(struct omap_sham_reqctx *ctx)
 365{
 366	size_t count;
 367
 368	while (ctx->sg) {
 369		count = omap_sham_append_buffer(ctx,
 370				sg_virt(ctx->sg) + ctx->offset,
 371				ctx->sg->length - ctx->offset);
 372		if (!count)
 373			break;
 374		ctx->offset += count;
 375		ctx->total -= count;
 376		if (ctx->offset == ctx->sg->length) {
 377			ctx->sg = sg_next(ctx->sg);
 378			if (ctx->sg)
 379				ctx->offset = 0;
 380			else
 381				ctx->total = 0;
 382		}
 
 
 383	}
 384
 
 
 
 
 
 
 
 
 385	return 0;
 386}
 387
 388static int omap_sham_xmit_dma_map(struct omap_sham_dev *dd,
 389					struct omap_sham_reqctx *ctx,
 390					size_t length, int final)
 391{
 392	ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen,
 393				       DMA_TO_DEVICE);
 394	if (dma_mapping_error(dd->dev, ctx->dma_addr)) {
 395		dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen);
 396		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 397	}
 398
 399	ctx->flags &= ~BIT(FLAGS_SG);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 400
 401	/* next call does not fail... so no unmap in the case of error */
 402	return omap_sham_xmit_dma(dd, ctx->dma_addr, length, final);
 403}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 404
 405static int omap_sham_update_dma_slow(struct omap_sham_dev *dd)
 406{
 407	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 408	unsigned int final;
 409	size_t count;
 410
 411	omap_sham_append_sg(ctx);
 
 
 
 
 
 
 
 
 412
 413	final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total;
 414
 415	dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n",
 416					 ctx->bufcnt, ctx->digcnt, final);
 
 
 
 417
 418	if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) {
 419		count = ctx->bufcnt;
 420		ctx->bufcnt = 0;
 421		return omap_sham_xmit_dma_map(dd, ctx, count, final);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 422	}
 423
 424	return 0;
 425}
 426
 427/* Start address alignment */
 428#define SG_AA(sg)	(IS_ALIGNED(sg->offset, sizeof(u32)))
 429/* SHA1 block size alignment */
 430#define SG_SA(sg)	(IS_ALIGNED(sg->length, SHA1_MD5_BLOCK_SIZE))
 431
 432static int omap_sham_update_dma_start(struct omap_sham_dev *dd)
 433{
 434	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 435	unsigned int length, final, tail;
 436	struct scatterlist *sg;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 437
 438	if (!ctx->total)
 439		return 0;
 440
 441	if (ctx->bufcnt || ctx->offset)
 442		return omap_sham_update_dma_slow(dd);
 443
 444	dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n",
 445			ctx->digcnt, ctx->bufcnt, ctx->total);
 
 
 
 
 
 
 
 
 446
 447	sg = ctx->sg;
 
 448
 449	if (!SG_AA(sg))
 450		return omap_sham_update_dma_slow(dd);
 451
 452	if (!sg_is_last(sg) && !SG_SA(sg))
 453		/* size is not SHA1_BLOCK_SIZE aligned */
 454		return omap_sham_update_dma_slow(dd);
 455
 456	length = min(ctx->total, sg->length);
 457
 458	if (sg_is_last(sg)) {
 459		if (!(ctx->flags & BIT(FLAGS_FINUP))) {
 460			/* not last sg must be SHA1_MD5_BLOCK_SIZE aligned */
 461			tail = length & (SHA1_MD5_BLOCK_SIZE - 1);
 462			/* without finup() we need one block to close hash */
 463			if (!tail)
 464				tail = SHA1_MD5_BLOCK_SIZE;
 465			length -= tail;
 466		}
 467	}
 468
 469	if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) {
 470		dev_err(dd->dev, "dma_map_sg  error\n");
 471		return -EINVAL;
 472	}
 473
 474	ctx->flags |= BIT(FLAGS_SG);
 
 475
 476	ctx->total -= length;
 477	ctx->offset = length; /* offset where to start slow */
 478
 479	final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total;
 480
 481	/* next call does not fail... so no unmap in the case of error */
 482	return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final);
 483}
 484
 485static int omap_sham_update_cpu(struct omap_sham_dev *dd)
 486{
 487	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 488	int bufcnt;
 489
 490	omap_sham_append_sg(ctx);
 491	bufcnt = ctx->bufcnt;
 492	ctx->bufcnt = 0;
 493
 494	return omap_sham_xmit_cpu(dd, ctx->buffer, bufcnt, 1);
 495}
 496
 497static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
 498{
 499	struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
 
 
 
 500
 501	omap_stop_dma(dd->dma_lch);
 502	if (ctx->flags & BIT(FLAGS_SG)) {
 503		dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
 504		if (ctx->sg->length == ctx->offset) {
 505			ctx->sg = sg_next(ctx->sg);
 506			if (ctx->sg)
 507				ctx->offset = 0;
 508		}
 509	} else {
 510		dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen,
 511				 DMA_TO_DEVICE);
 512	}
 513
 514	return 0;
 515}
 516
 517static int omap_sham_init(struct ahash_request *req)
 518{
 519	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 520	struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
 521	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 522	struct omap_sham_dev *dd = NULL, *tmp;
 
 523
 524	spin_lock_bh(&sham.lock);
 525	if (!tctx->dd) {
 526		list_for_each_entry(tmp, &sham.dev_list, list) {
 527			dd = tmp;
 528			break;
 529		}
 530		tctx->dd = dd;
 531	} else {
 532		dd = tctx->dd;
 533	}
 534	spin_unlock_bh(&sham.lock);
 535
 536	ctx->dd = dd;
 
 
 537
 538	ctx->flags = 0;
 539
 540	dev_dbg(dd->dev, "init: digest size: %d\n",
 541		crypto_ahash_digestsize(tfm));
 542
 543	if (crypto_ahash_digestsize(tfm) == SHA1_DIGEST_SIZE)
 544		ctx->flags |= BIT(FLAGS_SHA1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 545
 546	ctx->bufcnt = 0;
 547	ctx->digcnt = 0;
 
 
 548	ctx->buflen = BUFLEN;
 549
 550	if (tctx->flags & BIT(FLAGS_HMAC)) {
 551		struct omap_sham_hmac_ctx *bctx = tctx->base;
 
 
 
 
 
 552
 553		memcpy(ctx->buffer, bctx->ipad, SHA1_MD5_BLOCK_SIZE);
 554		ctx->bufcnt = SHA1_MD5_BLOCK_SIZE;
 555		ctx->flags |= BIT(FLAGS_HMAC);
 556	}
 557
 558	return 0;
 559
 560}
 561
 562static int omap_sham_update_req(struct omap_sham_dev *dd)
 563{
 564	struct ahash_request *req = dd->req;
 565	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 566	int err;
 
 
 
 
 
 567
 568	dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n",
 569		 ctx->total, ctx->digcnt, (ctx->flags & BIT(FLAGS_FINUP)) != 0);
 
 570
 571	if (ctx->flags & BIT(FLAGS_CPU))
 572		err = omap_sham_update_cpu(dd);
 573	else
 574		err = omap_sham_update_dma_start(dd);
 575
 576	/* wait for dma completion before can take more data */
 577	dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt);
 578
 579	return err;
 580}
 581
 582static int omap_sham_final_req(struct omap_sham_dev *dd)
 583{
 584	struct ahash_request *req = dd->req;
 585	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 586	int err = 0, use_dma = 1;
 587
 588	if (ctx->bufcnt <= 64)
 589		/* faster to handle last block with cpu */
 
 
 
 
 
 
 590		use_dma = 0;
 591
 592	if (use_dma)
 593		err = omap_sham_xmit_dma_map(dd, ctx, ctx->bufcnt, 1);
 594	else
 595		err = omap_sham_xmit_cpu(dd, ctx->buffer, ctx->bufcnt, 1);
 596
 597	ctx->bufcnt = 0;
 598
 599	dev_dbg(dd->dev, "final_req: err: %d\n", err);
 600
 601	return err;
 602}
 603
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 604static int omap_sham_finish_hmac(struct ahash_request *req)
 605{
 606	struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
 607	struct omap_sham_hmac_ctx *bctx = tctx->base;
 608	int bs = crypto_shash_blocksize(bctx->shash);
 609	int ds = crypto_shash_digestsize(bctx->shash);
 610	struct {
 611		struct shash_desc shash;
 612		char ctx[crypto_shash_descsize(bctx->shash)];
 613	} desc;
 614
 615	desc.shash.tfm = bctx->shash;
 616	desc.shash.flags = 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */
 617
 618	return crypto_shash_init(&desc.shash) ?:
 619	       crypto_shash_update(&desc.shash, bctx->opad, bs) ?:
 620	       crypto_shash_finup(&desc.shash, req->result, ds, req->result);
 621}
 622
 623static int omap_sham_finish(struct ahash_request *req)
 624{
 625	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 626	struct omap_sham_dev *dd = ctx->dd;
 627	int err = 0;
 628
 629	if (ctx->digcnt) {
 630		omap_sham_copy_ready_hash(req);
 631		if (ctx->flags & BIT(FLAGS_HMAC))
 
 632			err = omap_sham_finish_hmac(req);
 633	}
 634
 635	dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt);
 636
 637	return err;
 638}
 639
 640static void omap_sham_finish_req(struct ahash_request *req, int err)
 641{
 642	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 643	struct omap_sham_dev *dd = ctx->dd;
 644
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 645	if (!err) {
 646		omap_sham_copy_hash(req, 1);
 647		if (test_bit(FLAGS_FINAL, &dd->flags))
 648			err = omap_sham_finish(req);
 649	} else {
 650		ctx->flags |= BIT(FLAGS_ERROR);
 651	}
 652
 653	/* atomic operation is not needed here */
 654	dd->flags &= ~(BIT(FLAGS_BUSY) | BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) |
 655			BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY));
 656	clk_disable(dd->iclk);
 657
 658	if (req->base.complete)
 659		req->base.complete(&req->base, err);
 660
 661	/* handle new request */
 662	tasklet_schedule(&dd->done_task);
 
 663}
 664
 665static int omap_sham_handle_queue(struct omap_sham_dev *dd,
 666				  struct ahash_request *req)
 667{
 668	struct crypto_async_request *async_req, *backlog;
 669	struct omap_sham_reqctx *ctx;
 670	unsigned long flags;
 671	int err = 0, ret = 0;
 672
 673	spin_lock_irqsave(&dd->lock, flags);
 674	if (req)
 675		ret = ahash_enqueue_request(&dd->queue, req);
 676	if (test_bit(FLAGS_BUSY, &dd->flags)) {
 677		spin_unlock_irqrestore(&dd->lock, flags);
 678		return ret;
 679	}
 680	backlog = crypto_get_backlog(&dd->queue);
 681	async_req = crypto_dequeue_request(&dd->queue);
 682	if (async_req)
 683		set_bit(FLAGS_BUSY, &dd->flags);
 684	spin_unlock_irqrestore(&dd->lock, flags);
 685
 686	if (!async_req)
 687		return ret;
 688
 689	if (backlog)
 690		backlog->complete(backlog, -EINPROGRESS);
 691
 692	req = ahash_request_cast(async_req);
 693	dd->req = req;
 694	ctx = ahash_request_ctx(req);
 695
 696	dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
 697						ctx->op, req->nbytes);
 698
 699	err = omap_sham_hw_init(dd);
 700	if (err)
 701		goto err1;
 702
 703	omap_set_dma_dest_params(dd->dma_lch, 0,
 704			OMAP_DMA_AMODE_CONSTANT,
 705			dd->phys_base + SHA_REG_DIN(0), 0, 16);
 706
 707	omap_set_dma_dest_burst_mode(dd->dma_lch,
 708			OMAP_DMA_DATA_BURST_16);
 709
 710	omap_set_dma_src_burst_mode(dd->dma_lch,
 711			OMAP_DMA_DATA_BURST_4);
 712
 713	if (ctx->digcnt)
 714		/* request has changed - restore hash */
 715		omap_sham_copy_hash(req, 0);
 716
 717	if (ctx->op == OP_UPDATE) {
 718		err = omap_sham_update_req(dd);
 719		if (err != -EINPROGRESS && (ctx->flags & BIT(FLAGS_FINUP)))
 720			/* no final() after finup() */
 721			err = omap_sham_final_req(dd);
 722	} else if (ctx->op == OP_FINAL) {
 723		err = omap_sham_final_req(dd);
 724	}
 725err1:
 726	if (err != -EINPROGRESS)
 727		/* done_task will not finish it, so do it here */
 728		omap_sham_finish_req(req, err);
 729
 730	dev_dbg(dd->dev, "exit, err: %d\n", err);
 731
 732	return ret;
 733}
 734
 735static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
 736{
 737	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 738	struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
 739	struct omap_sham_dev *dd = tctx->dd;
 740
 741	ctx->op = op;
 742
 743	return omap_sham_handle_queue(dd, req);
 744}
 745
 746static int omap_sham_update(struct ahash_request *req)
 747{
 748	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 
 749
 750	if (!req->nbytes)
 751		return 0;
 752
 753	ctx->total = req->nbytes;
 754	ctx->sg = req->src;
 755	ctx->offset = 0;
 756
 757	if (ctx->flags & BIT(FLAGS_FINUP)) {
 758		if ((ctx->digcnt + ctx->bufcnt + ctx->total) < 9) {
 759			/*
 760			* OMAP HW accel works only with buffers >= 9
 761			* will switch to bypass in final()
 762			* final has the same request and data
 763			*/
 764			omap_sham_append_sg(ctx);
 765			return 0;
 766		} else if (ctx->bufcnt + ctx->total <= SHA1_MD5_BLOCK_SIZE) {
 767			/*
 768			* faster to use CPU for short transfers
 769			*/
 770			ctx->flags |= BIT(FLAGS_CPU);
 771		}
 772	} else if (ctx->bufcnt + ctx->total < ctx->buflen) {
 773		omap_sham_append_sg(ctx);
 774		return 0;
 775	}
 776
 
 
 
 777	return omap_sham_enqueue(req, OP_UPDATE);
 778}
 779
 780static int omap_sham_shash_digest(struct crypto_shash *shash, u32 flags,
 781				  const u8 *data, unsigned int len, u8 *out)
 782{
 783	struct {
 784		struct shash_desc shash;
 785		char ctx[crypto_shash_descsize(shash)];
 786	} desc;
 787
 788	desc.shash.tfm = shash;
 789	desc.shash.flags = flags & CRYPTO_TFM_REQ_MAY_SLEEP;
 790
 791	return crypto_shash_digest(&desc.shash, data, len, out);
 792}
 793
 794static int omap_sham_final_shash(struct ahash_request *req)
 795{
 796	struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
 797	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 
 798
 799	return omap_sham_shash_digest(tctx->fallback, req->base.flags,
 800				      ctx->buffer, ctx->bufcnt, req->result);
 
 
 
 
 
 
 
 
 
 801}
 802
 803static int omap_sham_final(struct ahash_request *req)
 804{
 805	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 806
 807	ctx->flags |= BIT(FLAGS_FINUP);
 808
 809	if (ctx->flags & BIT(FLAGS_ERROR))
 810		return 0; /* uncompleted hash is not needed */
 811
 812	/* OMAP HW accel works only with buffers >= 9 */
 813	/* HMAC is always >= 9 because ipad == block size */
 814	if ((ctx->digcnt + ctx->bufcnt) < 9)
 
 
 
 
 
 815		return omap_sham_final_shash(req);
 816	else if (ctx->bufcnt)
 817		return omap_sham_enqueue(req, OP_FINAL);
 818
 819	/* copy ready hash (+ finalize hmac) */
 820	return omap_sham_finish(req);
 821}
 822
 823static int omap_sham_finup(struct ahash_request *req)
 824{
 825	struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
 826	int err1, err2;
 827
 828	ctx->flags |= BIT(FLAGS_FINUP);
 829
 830	err1 = omap_sham_update(req);
 831	if (err1 == -EINPROGRESS || err1 == -EBUSY)
 832		return err1;
 833	/*
 834	 * final() has to be always called to cleanup resources
 835	 * even if udpate() failed, except EINPROGRESS
 836	 */
 837	err2 = omap_sham_final(req);
 838
 839	return err1 ?: err2;
 840}
 841
 842static int omap_sham_digest(struct ahash_request *req)
 843{
 844	return omap_sham_init(req) ?: omap_sham_finup(req);
 845}
 846
 847static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
 848		      unsigned int keylen)
 849{
 850	struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
 851	struct omap_sham_hmac_ctx *bctx = tctx->base;
 852	int bs = crypto_shash_blocksize(bctx->shash);
 853	int ds = crypto_shash_digestsize(bctx->shash);
 854	int err, i;
 
 855	err = crypto_shash_setkey(tctx->fallback, key, keylen);
 856	if (err)
 857		return err;
 858
 859	if (keylen > bs) {
 860		err = omap_sham_shash_digest(bctx->shash,
 861				crypto_shash_get_flags(bctx->shash),
 862				key, keylen, bctx->ipad);
 863		if (err)
 864			return err;
 865		keylen = ds;
 866	} else {
 867		memcpy(bctx->ipad, key, keylen);
 868	}
 869
 870	memset(bctx->ipad + keylen, 0, bs - keylen);
 871	memcpy(bctx->opad, bctx->ipad, bs);
 872
 873	for (i = 0; i < bs; i++) {
 874		bctx->ipad[i] ^= 0x36;
 875		bctx->opad[i] ^= 0x5c;
 
 
 
 
 876	}
 877
 878	return err;
 879}
 880
 881static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
 882{
 883	struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
 884	const char *alg_name = crypto_tfm_alg_name(tfm);
 885
 886	/* Allocate a fallback and abort if it failed. */
 887	tctx->fallback = crypto_alloc_shash(alg_name, 0,
 888					    CRYPTO_ALG_NEED_FALLBACK);
 889	if (IS_ERR(tctx->fallback)) {
 890		pr_err("omap-sham: fallback driver '%s' "
 891				"could not be loaded.\n", alg_name);
 892		return PTR_ERR(tctx->fallback);
 893	}
 894
 895	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 896				 sizeof(struct omap_sham_reqctx) + BUFLEN);
 897
 898	if (alg_base) {
 899		struct omap_sham_hmac_ctx *bctx = tctx->base;
 900		tctx->flags |= BIT(FLAGS_HMAC);
 901		bctx->shash = crypto_alloc_shash(alg_base, 0,
 902						CRYPTO_ALG_NEED_FALLBACK);
 903		if (IS_ERR(bctx->shash)) {
 904			pr_err("omap-sham: base driver '%s' "
 905					"could not be loaded.\n", alg_base);
 906			crypto_free_shash(tctx->fallback);
 907			return PTR_ERR(bctx->shash);
 908		}
 909
 910	}
 911
 912	return 0;
 913}
 914
 915static int omap_sham_cra_init(struct crypto_tfm *tfm)
 916{
 917	return omap_sham_cra_init_alg(tfm, NULL);
 918}
 919
 920static int omap_sham_cra_sha1_init(struct crypto_tfm *tfm)
 921{
 922	return omap_sham_cra_init_alg(tfm, "sha1");
 923}
 924
 
 
 
 
 
 
 
 
 
 
 925static int omap_sham_cra_md5_init(struct crypto_tfm *tfm)
 926{
 927	return omap_sham_cra_init_alg(tfm, "md5");
 928}
 929
 
 
 
 
 
 
 
 
 
 
 930static void omap_sham_cra_exit(struct crypto_tfm *tfm)
 931{
 932	struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
 933
 934	crypto_free_shash(tctx->fallback);
 935	tctx->fallback = NULL;
 936
 937	if (tctx->flags & BIT(FLAGS_HMAC)) {
 938		struct omap_sham_hmac_ctx *bctx = tctx->base;
 939		crypto_free_shash(bctx->shash);
 940	}
 941}
 942
 943static struct ahash_alg algs[] = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 944{
 945	.init		= omap_sham_init,
 946	.update		= omap_sham_update,
 947	.final		= omap_sham_final,
 948	.finup		= omap_sham_finup,
 949	.digest		= omap_sham_digest,
 950	.halg.digestsize	= SHA1_DIGEST_SIZE,
 951	.halg.base	= {
 952		.cra_name		= "sha1",
 953		.cra_driver_name	= "omap-sha1",
 954		.cra_priority		= 100,
 955		.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
 956						CRYPTO_ALG_ASYNC |
 957						CRYPTO_ALG_NEED_FALLBACK,
 958		.cra_blocksize		= SHA1_BLOCK_SIZE,
 959		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
 960		.cra_alignmask		= 0,
 961		.cra_module		= THIS_MODULE,
 962		.cra_init		= omap_sham_cra_init,
 963		.cra_exit		= omap_sham_cra_exit,
 964	}
 
 965},
 966{
 967	.init		= omap_sham_init,
 968	.update		= omap_sham_update,
 969	.final		= omap_sham_final,
 970	.finup		= omap_sham_finup,
 971	.digest		= omap_sham_digest,
 972	.halg.digestsize	= MD5_DIGEST_SIZE,
 973	.halg.base	= {
 974		.cra_name		= "md5",
 975		.cra_driver_name	= "omap-md5",
 976		.cra_priority		= 100,
 977		.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
 978						CRYPTO_ALG_ASYNC |
 979						CRYPTO_ALG_NEED_FALLBACK,
 980		.cra_blocksize		= SHA1_BLOCK_SIZE,
 981		.cra_ctxsize		= sizeof(struct omap_sham_ctx),
 982		.cra_alignmask		= OMAP_ALIGN_MASK,
 983		.cra_module		= THIS_MODULE,
 984		.cra_init		= omap_sham_cra_init,
 985		.cra_exit		= omap_sham_cra_exit,
 986	}
 
 987},
 988{
 989	.init		= omap_sham_init,
 990	.update		= omap_sham_update,
 991	.final		= omap_sham_final,
 992	.finup		= omap_sham_finup,
 993	.digest		= omap_sham_digest,
 994	.setkey		= omap_sham_setkey,
 995	.halg.digestsize	= SHA1_DIGEST_SIZE,
 996	.halg.base	= {
 997		.cra_name		= "hmac(sha1)",
 998		.cra_driver_name	= "omap-hmac-sha1",
 999		.cra_priority		= 100,
1000		.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
1001						CRYPTO_ALG_ASYNC |
1002						CRYPTO_ALG_NEED_FALLBACK,
1003		.cra_blocksize		= SHA1_BLOCK_SIZE,
1004		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
1005					sizeof(struct omap_sham_hmac_ctx),
1006		.cra_alignmask		= OMAP_ALIGN_MASK,
1007		.cra_module		= THIS_MODULE,
1008		.cra_init		= omap_sham_cra_sha1_init,
1009		.cra_exit		= omap_sham_cra_exit,
1010	}
 
1011},
1012{
1013	.init		= omap_sham_init,
1014	.update		= omap_sham_update,
1015	.final		= omap_sham_final,
1016	.finup		= omap_sham_finup,
1017	.digest		= omap_sham_digest,
1018	.setkey		= omap_sham_setkey,
1019	.halg.digestsize	= MD5_DIGEST_SIZE,
1020	.halg.base	= {
1021		.cra_name		= "hmac(md5)",
1022		.cra_driver_name	= "omap-hmac-md5",
1023		.cra_priority		= 100,
1024		.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
1025						CRYPTO_ALG_ASYNC |
1026						CRYPTO_ALG_NEED_FALLBACK,
1027		.cra_blocksize		= SHA1_BLOCK_SIZE,
1028		.cra_ctxsize		= sizeof(struct omap_sham_ctx) +
1029					sizeof(struct omap_sham_hmac_ctx),
1030		.cra_alignmask		= OMAP_ALIGN_MASK,
1031		.cra_module		= THIS_MODULE,
1032		.cra_init		= omap_sham_cra_md5_init,
1033		.cra_exit		= omap_sham_cra_exit,
1034	}
 
1035}
1036};
1037
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1038static void omap_sham_done_task(unsigned long data)
1039{
1040	struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
1041	int err = 0;
1042
1043	if (!test_bit(FLAGS_BUSY, &dd->flags)) {
1044		omap_sham_handle_queue(dd, NULL);
1045		return;
1046	}
1047
1048	if (test_bit(FLAGS_CPU, &dd->flags)) {
1049		if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags))
1050			goto finish;
1051	} else if (test_bit(FLAGS_DMA_READY, &dd->flags)) {
1052		if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) {
1053			omap_sham_update_dma_stop(dd);
1054			if (dd->err) {
1055				err = dd->err;
1056				goto finish;
1057			}
1058		}
1059		if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) {
1060			/* hash or semi-hash ready */
1061			clear_bit(FLAGS_DMA_READY, &dd->flags);
1062			err = omap_sham_update_dma_start(dd);
1063			if (err != -EINPROGRESS)
1064				goto finish;
1065		}
1066	}
1067
1068	return;
1069
1070finish:
1071	dev_dbg(dd->dev, "update done: err: %d\n", err);
1072	/* finish curent request */
1073	omap_sham_finish_req(dd->req, err);
1074}
1075
1076static irqreturn_t omap_sham_irq(int irq, void *dev_id)
 
 
 
 
 
 
 
 
1077{
1078	struct omap_sham_dev *dd = dev_id;
1079
1080	if (unlikely(test_bit(FLAGS_FINAL, &dd->flags)))
1081		/* final -> allow device to go to power-saving mode */
1082		omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH);
1083
1084	omap_sham_write_mask(dd, SHA_REG_CTRL, SHA_REG_CTRL_OUTPUT_READY,
1085				 SHA_REG_CTRL_OUTPUT_READY);
1086	omap_sham_read(dd, SHA_REG_CTRL);
1087
1088	if (!test_bit(FLAGS_BUSY, &dd->flags)) {
1089		dev_warn(dd->dev, "Interrupt when no active requests.\n");
1090		return IRQ_HANDLED;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1091	}
1092
1093	set_bit(FLAGS_OUTPUT_READY, &dd->flags);
1094	tasklet_schedule(&dd->done_task);
 
 
 
 
 
1095
1096	return IRQ_HANDLED;
 
 
 
1097}
 
1098
1099static void omap_sham_dma_callback(int lch, u16 ch_status, void *data)
 
1100{
1101	struct omap_sham_dev *dd = data;
 
 
1102
1103	if (ch_status != OMAP_DMA_BLOCK_IRQ) {
1104		pr_err("omap-sham DMA error status: 0x%hx\n", ch_status);
1105		dd->err = -EIO;
1106		clear_bit(FLAGS_INIT, &dd->flags);/* request to re-initialize */
 
 
1107	}
 
 
 
 
 
 
 
 
 
 
 
1108
1109	set_bit(FLAGS_DMA_READY, &dd->flags);
1110	tasklet_schedule(&dd->done_task);
 
 
 
 
 
 
 
 
1111}
1112
1113static int omap_sham_dma_init(struct omap_sham_dev *dd)
 
1114{
1115	int err;
 
 
1116
1117	dd->dma_lch = -1;
 
 
1118
1119	err = omap_request_dma(dd->dma, dev_name(dd->dev),
1120			omap_sham_dma_callback, dd, &dd->dma_lch);
1121	if (err) {
1122		dev_err(dd->dev, "Unable to request DMA channel\n");
1123		return err;
1124	}
1125
1126	return 0;
 
 
 
 
 
 
 
 
 
 
1127}
1128
1129static void omap_sham_dma_cleanup(struct omap_sham_dev *dd)
 
 
1130{
1131	if (dd->dma_lch >= 0) {
1132		omap_free_dma(dd->dma_lch);
1133		dd->dma_lch = -1;
1134	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1135}
1136
1137static int __devinit omap_sham_probe(struct platform_device *pdev)
 
 
 
 
 
 
 
 
 
 
 
 
 
1138{
1139	struct omap_sham_dev *dd;
1140	struct device *dev = &pdev->dev;
1141	struct resource *res;
 
1142	int err, i, j;
 
1143
1144	dd = kzalloc(sizeof(struct omap_sham_dev), GFP_KERNEL);
1145	if (dd == NULL) {
1146		dev_err(dev, "unable to alloc data struct.\n");
1147		err = -ENOMEM;
1148		goto data_err;
1149	}
1150	dd->dev = dev;
1151	platform_set_drvdata(pdev, dd);
1152
1153	INIT_LIST_HEAD(&dd->list);
1154	spin_lock_init(&dd->lock);
1155	tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd);
1156	crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH);
1157
1158	dd->irq = -1;
 
 
 
1159
1160	/* Get the base address */
1161	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1162	if (!res) {
1163		dev_err(dev, "no MEM resource info\n");
1164		err = -ENODEV;
1165		goto res_err;
1166	}
1167	dd->phys_base = res->start;
1168
1169	/* Get the DMA */
1170	res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
1171	if (!res) {
1172		dev_err(dev, "no DMA resource info\n");
1173		err = -ENODEV;
1174		goto res_err;
1175	}
1176	dd->dma = res->start;
1177
1178	/* Get the IRQ */
1179	dd->irq = platform_get_irq(pdev,  0);
1180	if (dd->irq < 0) {
1181		dev_err(dev, "no IRQ resource info\n");
1182		err = dd->irq;
1183		goto res_err;
 
 
 
 
 
1184	}
1185
1186	err = request_irq(dd->irq, omap_sham_irq,
1187			IRQF_TRIGGER_LOW, dev_name(dev), dd);
1188	if (err) {
1189		dev_err(dev, "unable to request irq.\n");
1190		goto res_err;
 
 
 
 
 
 
 
 
 
1191	}
1192
1193	err = omap_sham_dma_init(dd);
1194	if (err)
1195		goto dma_err;
 
 
 
1196
1197	/* Initializing the clock */
1198	dd->iclk = clk_get(dev, "ick");
1199	if (IS_ERR(dd->iclk)) {
1200		dev_err(dev, "clock intialization failed.\n");
1201		err = PTR_ERR(dd->iclk);
1202		goto clk_err;
1203	}
1204
1205	dd->io_base = ioremap(dd->phys_base, SZ_4K);
1206	if (!dd->io_base) {
1207		dev_err(dev, "can't ioremap\n");
1208		err = -ENOMEM;
1209		goto io_err;
1210	}
1211
1212	clk_enable(dd->iclk);
1213	dev_info(dev, "hw accel on OMAP rev %u.%u\n",
1214		(omap_sham_read(dd, SHA_REG_REV) & SHA_REG_REV_MAJOR) >> 4,
1215		omap_sham_read(dd, SHA_REG_REV) & SHA_REG_REV_MINOR);
1216	clk_disable(dd->iclk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1217
1218	spin_lock(&sham.lock);
1219	list_add_tail(&dd->list, &sham.dev_list);
1220	spin_unlock(&sham.lock);
1221
1222	for (i = 0; i < ARRAY_SIZE(algs); i++) {
1223		err = crypto_register_ahash(&algs[i]);
1224		if (err)
1225			goto err_algs;
1226	}
1227
1228	return 0;
1229
1230err_algs:
1231	for (j = 0; j < i; j++)
1232		crypto_unregister_ahash(&algs[j]);
1233	iounmap(dd->io_base);
1234io_err:
1235	clk_put(dd->iclk);
1236clk_err:
1237	omap_sham_dma_cleanup(dd);
1238dma_err:
1239	if (dd->irq >= 0)
1240		free_irq(dd->irq, dd);
1241res_err:
1242	kfree(dd);
1243	dd = NULL;
 
 
1244data_err:
1245	dev_err(dev, "initialization failed.\n");
1246
1247	return err;
1248}
1249
1250static int __devexit omap_sham_remove(struct platform_device *pdev)
1251{
1252	static struct omap_sham_dev *dd;
1253	int i;
1254
1255	dd = platform_get_drvdata(pdev);
1256	if (!dd)
1257		return -ENODEV;
1258	spin_lock(&sham.lock);
1259	list_del(&dd->list);
1260	spin_unlock(&sham.lock);
1261	for (i = 0; i < ARRAY_SIZE(algs); i++)
1262		crypto_unregister_ahash(&algs[i]);
 
 
 
 
1263	tasklet_kill(&dd->done_task);
1264	iounmap(dd->io_base);
1265	clk_put(dd->iclk);
1266	omap_sham_dma_cleanup(dd);
1267	if (dd->irq >= 0)
1268		free_irq(dd->irq, dd);
1269	kfree(dd);
1270	dd = NULL;
1271
1272	return 0;
 
 
 
1273}
1274
1275static struct platform_driver omap_sham_driver = {
1276	.probe	= omap_sham_probe,
1277	.remove	= omap_sham_remove,
1278	.driver	= {
1279		.name	= "omap-sham",
1280		.owner	= THIS_MODULE,
1281	},
1282};
1283
1284static int __init omap_sham_mod_init(void)
1285{
1286	pr_info("loading %s driver\n", "omap-sham");
1287
1288	if (!cpu_class_is_omap2() ||
1289		(omap_type() != OMAP2_DEVICE_TYPE_SEC &&
1290			omap_type() != OMAP2_DEVICE_TYPE_EMU)) {
1291		pr_err("Unsupported cpu\n");
1292		return -ENODEV;
1293	}
1294
1295	return platform_driver_register(&omap_sham_driver);
1296}
1297
1298static void __exit omap_sham_mod_exit(void)
1299{
1300	platform_driver_unregister(&omap_sham_driver);
1301}
1302
1303module_init(omap_sham_mod_init);
1304module_exit(omap_sham_mod_exit);
1305
1306MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support.");
1307MODULE_LICENSE("GPL v2");
1308MODULE_AUTHOR("Dmitry Kasatkin");