Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Cryptographic API.
   4 *
   5 * Support for SAHARA cryptographic accelerator.
   6 *
   7 * Copyright (c) 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
   8 * Copyright (c) 2013 Vista Silicon S.L.
   9 * Author: Javier Martin <javier.martin@vista-silicon.com>
  10 *
 
 
 
 
  11 * Based on omap-aes.c and tegra-aes.c
  12 */
  13
 
  14#include <crypto/aes.h>
 
  15#include <crypto/internal/hash.h>
  16#include <crypto/internal/skcipher.h>
  17#include <crypto/scatterwalk.h>
  18#include <crypto/engine.h>
  19#include <crypto/sha1.h>
  20#include <crypto/sha2.h>
  21
  22#include <linux/clk.h>
  23#include <linux/dma-mapping.h>
  24#include <linux/interrupt.h>
  25#include <linux/io.h>
  26#include <linux/irq.h>
  27#include <linux/kernel.h>
 
  28#include <linux/module.h>
 
  29#include <linux/of.h>
 
  30#include <linux/platform_device.h>
  31#include <linux/spinlock.h>
  32
  33#define SHA_BUFFER_LEN				PAGE_SIZE
  34#define SAHARA_MAX_SHA_BLOCK_SIZE		SHA256_BLOCK_SIZE
  35
  36#define SAHARA_NAME				"sahara"
  37#define SAHARA_VERSION_3			3
  38#define SAHARA_VERSION_4			4
  39#define SAHARA_TIMEOUT_MS			1000
  40#define SAHARA_MAX_HW_DESC			2
  41#define SAHARA_MAX_HW_LINK			20
  42
  43#define FLAGS_MODE_MASK				0x000f
  44#define FLAGS_ENCRYPT				BIT(0)
  45#define FLAGS_CBC				BIT(1)
  46
  47#define SAHARA_HDR_BASE				0x00800000
  48#define SAHARA_HDR_SKHA_ALG_AES			0
  49#define SAHARA_HDR_SKHA_MODE_ECB		0
  50#define SAHARA_HDR_SKHA_OP_ENC			BIT(2)
  51#define SAHARA_HDR_SKHA_MODE_CBC		BIT(3)
  52#define SAHARA_HDR_FORM_DATA			(5 << 16)
  53#define SAHARA_HDR_FORM_KEY			BIT(19)
  54#define SAHARA_HDR_LLO				BIT(24)
  55#define SAHARA_HDR_CHA_SKHA			BIT(28)
  56#define SAHARA_HDR_CHA_MDHA			BIT(29)
  57#define SAHARA_HDR_PARITY_BIT			BIT(31)
  58
  59#define SAHARA_HDR_MDHA_SET_MODE_MD_KEY		0x20880000
  60#define SAHARA_HDR_MDHA_SET_MODE_HASH		0x208D0000
  61#define SAHARA_HDR_MDHA_HASH			0xA0850000
  62#define SAHARA_HDR_MDHA_STORE_DIGEST		0x20820000
  63#define SAHARA_HDR_MDHA_ALG_SHA1		0
  64#define SAHARA_HDR_MDHA_ALG_MD5			1
  65#define SAHARA_HDR_MDHA_ALG_SHA256		2
  66#define SAHARA_HDR_MDHA_ALG_SHA224		3
  67#define SAHARA_HDR_MDHA_PDATA			BIT(2)
  68#define SAHARA_HDR_MDHA_HMAC			BIT(3)
  69#define SAHARA_HDR_MDHA_INIT			BIT(5)
  70#define SAHARA_HDR_MDHA_IPAD			BIT(6)
  71#define SAHARA_HDR_MDHA_OPAD			BIT(7)
  72#define SAHARA_HDR_MDHA_SWAP			BIT(8)
  73#define SAHARA_HDR_MDHA_MAC_FULL		BIT(9)
  74#define SAHARA_HDR_MDHA_SSL			BIT(10)
  75
  76#define SAHARA_REG_VERSION			0x00
  77#define SAHARA_REG_DAR				0x04
  78#define SAHARA_REG_CONTROL			0x08
  79#define SAHARA_CONTROL_SET_THROTTLE(x)		(((x) & 0xff) << 24)
  80#define SAHARA_CONTROL_SET_MAXBURST(x)		(((x) & 0xff) << 16)
  81#define SAHARA_CONTROL_RNG_AUTORSD		BIT(7)
  82#define SAHARA_CONTROL_ENABLE_INT		BIT(4)
  83#define SAHARA_REG_CMD				0x0C
  84#define SAHARA_CMD_RESET			BIT(0)
  85#define SAHARA_CMD_CLEAR_INT			BIT(8)
  86#define SAHARA_CMD_CLEAR_ERR			BIT(9)
  87#define SAHARA_CMD_SINGLE_STEP			BIT(10)
  88#define SAHARA_CMD_MODE_BATCH			BIT(16)
  89#define SAHARA_CMD_MODE_DEBUG			BIT(18)
  90#define SAHARA_REG_STATUS			0x10
  91#define SAHARA_STATUS_GET_STATE(x)		((x) & 0x7)
  92#define SAHARA_STATE_IDLE			0
  93#define SAHARA_STATE_BUSY			1
  94#define SAHARA_STATE_ERR			2
  95#define SAHARA_STATE_FAULT			3
  96#define SAHARA_STATE_COMPLETE			4
  97#define SAHARA_STATE_COMP_FLAG			BIT(2)
  98#define SAHARA_STATUS_DAR_FULL			BIT(3)
  99#define SAHARA_STATUS_ERROR			BIT(4)
 100#define SAHARA_STATUS_SECURE			BIT(5)
 101#define SAHARA_STATUS_FAIL			BIT(6)
 102#define SAHARA_STATUS_INIT			BIT(7)
 103#define SAHARA_STATUS_RNG_RESEED		BIT(8)
 104#define SAHARA_STATUS_ACTIVE_RNG		BIT(9)
 105#define SAHARA_STATUS_ACTIVE_MDHA		BIT(10)
 106#define SAHARA_STATUS_ACTIVE_SKHA		BIT(11)
 107#define SAHARA_STATUS_MODE_BATCH		BIT(16)
 108#define SAHARA_STATUS_MODE_DEDICATED		BIT(17)
 109#define SAHARA_STATUS_MODE_DEBUG		BIT(18)
 110#define SAHARA_STATUS_GET_ISTATE(x)		(((x) >> 24) & 0xff)
 111#define SAHARA_REG_ERRSTATUS			0x14
 112#define SAHARA_ERRSTATUS_GET_SOURCE(x)		((x) & 0xf)
 113#define SAHARA_ERRSOURCE_CHA			14
 114#define SAHARA_ERRSOURCE_DMA			15
 115#define SAHARA_ERRSTATUS_DMA_DIR		BIT(8)
 116#define SAHARA_ERRSTATUS_GET_DMASZ(x)		(((x) >> 9) & 0x3)
 117#define SAHARA_ERRSTATUS_GET_DMASRC(x)		(((x) >> 13) & 0x7)
 118#define SAHARA_ERRSTATUS_GET_CHASRC(x)		(((x) >> 16) & 0xfff)
 119#define SAHARA_ERRSTATUS_GET_CHAERR(x)		(((x) >> 28) & 0x3)
 120#define SAHARA_REG_FADDR			0x18
 121#define SAHARA_REG_CDAR				0x1C
 122#define SAHARA_REG_IDAR				0x20
 
 
 
 
 123
 124struct sahara_hw_desc {
 125	u32	hdr;
 126	u32	len1;
 127	u32	p1;
 128	u32	len2;
 129	u32	p2;
 130	u32	next;
 131};
 132
 133struct sahara_hw_link {
 134	u32	len;
 135	u32	p;
 136	u32	next;
 137};
 138
 139struct sahara_ctx {
 
 
 140	/* AES-specific context */
 141	int keylen;
 142	u8 key[AES_KEYSIZE_128];
 143	struct crypto_skcipher *fallback;
 
 
 
 144};
 145
 146struct sahara_aes_reqctx {
 147	unsigned long mode;
 148	u8 iv_out[AES_BLOCK_SIZE];
 149	struct skcipher_request fallback_req;	// keep at the end
 150};
 151
 152/*
 153 * struct sahara_sha_reqctx - private data per request
 154 * @buf: holds data for requests smaller than block_size
 155 * @rembuf: used to prepare one block_size-aligned request
 156 * @context: hw-specific context for request. Digest is extracted from this
 157 * @mode: specifies what type of hw-descriptor needs to be built
 158 * @digest_size: length of digest for this request
 159 * @context_size: length of hw-context for this request.
 160 *                Always digest_size + 4
 161 * @buf_cnt: number of bytes saved in buf
 162 * @sg_in_idx: number of hw links
 163 * @in_sg: scatterlist for input data
 164 * @in_sg_chain: scatterlists for chained input data
 165 * @total: total number of bytes for transfer
 166 * @last: is this the last block
 167 * @first: is this the first block
 
 168 */
 169struct sahara_sha_reqctx {
 170	u8			buf[SAHARA_MAX_SHA_BLOCK_SIZE];
 171	u8			rembuf[SAHARA_MAX_SHA_BLOCK_SIZE];
 172	u8			context[SHA256_DIGEST_SIZE + 4];
 173	unsigned int		mode;
 174	unsigned int		digest_size;
 175	unsigned int		context_size;
 176	unsigned int		buf_cnt;
 177	unsigned int		sg_in_idx;
 178	struct scatterlist	*in_sg;
 179	struct scatterlist	in_sg_chain[2];
 180	size_t			total;
 181	unsigned int		last;
 182	unsigned int		first;
 
 183};
 184
 185struct sahara_dev {
 186	struct device		*device;
 187	unsigned int		version;
 188	void __iomem		*regs_base;
 189	struct clk		*clk_ipg;
 190	struct clk		*clk_ahb;
 
 
 191	struct completion	dma_completion;
 192
 193	struct sahara_ctx	*ctx;
 
 
 194	unsigned long		flags;
 195
 196	struct sahara_hw_desc	*hw_desc[SAHARA_MAX_HW_DESC];
 197	dma_addr_t		hw_phys_desc[SAHARA_MAX_HW_DESC];
 198
 199	u8			*key_base;
 200	dma_addr_t		key_phys_base;
 201
 202	u8			*iv_base;
 203	dma_addr_t		iv_phys_base;
 204
 205	u8			*context_base;
 206	dma_addr_t		context_phys_base;
 207
 208	struct sahara_hw_link	*hw_link[SAHARA_MAX_HW_LINK];
 209	dma_addr_t		hw_phys_link[SAHARA_MAX_HW_LINK];
 210
 211	size_t			total;
 212	struct scatterlist	*in_sg;
 213	int		nb_in_sg;
 214	struct scatterlist	*out_sg;
 215	int		nb_out_sg;
 216
 217	struct crypto_engine *engine;
 218};
 219
 220static struct sahara_dev *dev_ptr;
 221
 222static inline void sahara_write(struct sahara_dev *dev, u32 data, u32 reg)
 223{
 224	writel(data, dev->regs_base + reg);
 225}
 226
 227static inline unsigned int sahara_read(struct sahara_dev *dev, u32 reg)
 228{
 229	return readl(dev->regs_base + reg);
 230}
 231
 232static u32 sahara_aes_key_hdr(struct sahara_dev *dev)
 233{
 234	u32 hdr = SAHARA_HDR_BASE | SAHARA_HDR_SKHA_ALG_AES |
 235			SAHARA_HDR_FORM_KEY | SAHARA_HDR_LLO |
 236			SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
 237
 238	if (dev->flags & FLAGS_CBC) {
 239		hdr |= SAHARA_HDR_SKHA_MODE_CBC;
 240		hdr ^= SAHARA_HDR_PARITY_BIT;
 241	}
 242
 243	if (dev->flags & FLAGS_ENCRYPT) {
 244		hdr |= SAHARA_HDR_SKHA_OP_ENC;
 245		hdr ^= SAHARA_HDR_PARITY_BIT;
 246	}
 247
 248	return hdr;
 249}
 250
 251static u32 sahara_aes_data_link_hdr(struct sahara_dev *dev)
 252{
 253	return SAHARA_HDR_BASE | SAHARA_HDR_FORM_DATA |
 254			SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
 255}
 256
 257static const char *sahara_err_src[16] = {
 258	"No error",
 259	"Header error",
 260	"Descriptor length error",
 261	"Descriptor length or pointer error",
 262	"Link length error",
 263	"Link pointer error",
 264	"Input buffer error",
 265	"Output buffer error",
 266	"Output buffer starvation",
 267	"Internal state fault",
 268	"General descriptor problem",
 269	"Reserved",
 270	"Descriptor address error",
 271	"Link address error",
 272	"CHA error",
 273	"DMA error"
 274};
 275
 276static const char *sahara_err_dmasize[4] = {
 277	"Byte transfer",
 278	"Half-word transfer",
 279	"Word transfer",
 280	"Reserved"
 281};
 282
 283static const char *sahara_err_dmasrc[8] = {
 284	"No error",
 285	"AHB bus error",
 286	"Internal IP bus error",
 287	"Parity error",
 288	"DMA crosses 256 byte boundary",
 289	"DMA is busy",
 290	"Reserved",
 291	"DMA HW error"
 292};
 293
 294static const char *sahara_cha_errsrc[12] = {
 295	"Input buffer non-empty",
 296	"Illegal address",
 297	"Illegal mode",
 298	"Illegal data size",
 299	"Illegal key size",
 300	"Write during processing",
 301	"CTX read during processing",
 302	"HW error",
 303	"Input buffer disabled/underflow",
 304	"Output buffer disabled/overflow",
 305	"DES key parity error",
 306	"Reserved"
 307};
 308
 309static const char *sahara_cha_err[4] = { "No error", "SKHA", "MDHA", "RNG" };
 310
 311static void sahara_decode_error(struct sahara_dev *dev, unsigned int error)
 312{
 313	u8 source = SAHARA_ERRSTATUS_GET_SOURCE(error);
 314	u16 chasrc = ffs(SAHARA_ERRSTATUS_GET_CHASRC(error));
 315
 316	dev_err(dev->device, "%s: Error Register = 0x%08x\n", __func__, error);
 317
 318	dev_err(dev->device, "	- %s.\n", sahara_err_src[source]);
 319
 320	if (source == SAHARA_ERRSOURCE_DMA) {
 321		if (error & SAHARA_ERRSTATUS_DMA_DIR)
 322			dev_err(dev->device, "		* DMA read.\n");
 323		else
 324			dev_err(dev->device, "		* DMA write.\n");
 325
 326		dev_err(dev->device, "		* %s.\n",
 327		       sahara_err_dmasize[SAHARA_ERRSTATUS_GET_DMASZ(error)]);
 328		dev_err(dev->device, "		* %s.\n",
 329		       sahara_err_dmasrc[SAHARA_ERRSTATUS_GET_DMASRC(error)]);
 330	} else if (source == SAHARA_ERRSOURCE_CHA) {
 331		dev_err(dev->device, "		* %s.\n",
 332			sahara_cha_errsrc[chasrc]);
 333		dev_err(dev->device, "		* %s.\n",
 334		       sahara_cha_err[SAHARA_ERRSTATUS_GET_CHAERR(error)]);
 335	}
 336	dev_err(dev->device, "\n");
 337}
 338
 339static const char *sahara_state[4] = { "Idle", "Busy", "Error", "HW Fault" };
 340
 341static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
 342{
 343	u8 state;
 344
 345	if (!__is_defined(DEBUG))
 346		return;
 347
 348	state = SAHARA_STATUS_GET_STATE(status);
 349
 350	dev_dbg(dev->device, "%s: Status Register = 0x%08x\n",
 351		__func__, status);
 352
 353	dev_dbg(dev->device, "	- State = %d:\n", state);
 354	if (state & SAHARA_STATE_COMP_FLAG)
 355		dev_dbg(dev->device, "		* Descriptor completed. IRQ pending.\n");
 356
 357	dev_dbg(dev->device, "		* %s.\n",
 358	       sahara_state[state & ~SAHARA_STATE_COMP_FLAG]);
 359
 360	if (status & SAHARA_STATUS_DAR_FULL)
 361		dev_dbg(dev->device, "	- DAR Full.\n");
 362	if (status & SAHARA_STATUS_ERROR)
 363		dev_dbg(dev->device, "	- Error.\n");
 364	if (status & SAHARA_STATUS_SECURE)
 365		dev_dbg(dev->device, "	- Secure.\n");
 366	if (status & SAHARA_STATUS_FAIL)
 367		dev_dbg(dev->device, "	- Fail.\n");
 368	if (status & SAHARA_STATUS_RNG_RESEED)
 369		dev_dbg(dev->device, "	- RNG Reseed Request.\n");
 370	if (status & SAHARA_STATUS_ACTIVE_RNG)
 371		dev_dbg(dev->device, "	- RNG Active.\n");
 372	if (status & SAHARA_STATUS_ACTIVE_MDHA)
 373		dev_dbg(dev->device, "	- MDHA Active.\n");
 374	if (status & SAHARA_STATUS_ACTIVE_SKHA)
 375		dev_dbg(dev->device, "	- SKHA Active.\n");
 376
 377	if (status & SAHARA_STATUS_MODE_BATCH)
 378		dev_dbg(dev->device, "	- Batch Mode.\n");
 379	else if (status & SAHARA_STATUS_MODE_DEDICATED)
 380		dev_dbg(dev->device, "	- Dedicated Mode.\n");
 381	else if (status & SAHARA_STATUS_MODE_DEBUG)
 382		dev_dbg(dev->device, "	- Debug Mode.\n");
 383
 384	dev_dbg(dev->device, "	- Internal state = 0x%02x\n",
 385	       SAHARA_STATUS_GET_ISTATE(status));
 386
 387	dev_dbg(dev->device, "Current DAR: 0x%08x\n",
 388		sahara_read(dev, SAHARA_REG_CDAR));
 389	dev_dbg(dev->device, "Initial DAR: 0x%08x\n\n",
 390		sahara_read(dev, SAHARA_REG_IDAR));
 391}
 392
 393static void sahara_dump_descriptors(struct sahara_dev *dev)
 394{
 395	int i;
 396
 397	if (!__is_defined(DEBUG))
 398		return;
 399
 400	for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
 401		dev_dbg(dev->device, "Descriptor (%d) (%pad):\n",
 402			i, &dev->hw_phys_desc[i]);
 403		dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr);
 404		dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1);
 405		dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1);
 406		dev_dbg(dev->device, "\tlen2 = %u\n", dev->hw_desc[i]->len2);
 407		dev_dbg(dev->device, "\tp2 = 0x%08x\n", dev->hw_desc[i]->p2);
 408		dev_dbg(dev->device, "\tnext = 0x%08x\n",
 409			dev->hw_desc[i]->next);
 410	}
 411	dev_dbg(dev->device, "\n");
 412}
 413
 414static void sahara_dump_links(struct sahara_dev *dev)
 415{
 416	int i;
 417
 418	if (!__is_defined(DEBUG))
 419		return;
 420
 421	for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
 422		dev_dbg(dev->device, "Link (%d) (%pad):\n",
 423			i, &dev->hw_phys_link[i]);
 424		dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len);
 425		dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p);
 426		dev_dbg(dev->device, "\tnext = 0x%08x\n",
 427			dev->hw_link[i]->next);
 428	}
 429	dev_dbg(dev->device, "\n");
 430}
 431
 432static int sahara_hw_descriptor_create(struct sahara_dev *dev)
 433{
 434	struct sahara_ctx *ctx = dev->ctx;
 435	struct scatterlist *sg;
 436	int ret;
 437	int i, j;
 438	int idx = 0;
 439	u32 len;
 440
 441	memcpy(dev->key_base, ctx->key, ctx->keylen);
 442
 443	if (dev->flags & FLAGS_CBC) {
 444		dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
 445		dev->hw_desc[idx]->p1 = dev->iv_phys_base;
 446	} else {
 447		dev->hw_desc[idx]->len1 = 0;
 448		dev->hw_desc[idx]->p1 = 0;
 449	}
 450	dev->hw_desc[idx]->len2 = ctx->keylen;
 451	dev->hw_desc[idx]->p2 = dev->key_phys_base;
 452	dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
 453	dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
 
 
 454
 455	idx++;
 456
 
 
 457
 458	dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total);
 459	if (dev->nb_in_sg < 0) {
 460		dev_err(dev->device, "Invalid numbers of src SG.\n");
 461		return dev->nb_in_sg;
 462	}
 463	dev->nb_out_sg = sg_nents_for_len(dev->out_sg, dev->total);
 464	if (dev->nb_out_sg < 0) {
 465		dev_err(dev->device, "Invalid numbers of dst SG.\n");
 466		return dev->nb_out_sg;
 467	}
 468	if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
 469		dev_err(dev->device, "not enough hw links (%d)\n",
 470			dev->nb_in_sg + dev->nb_out_sg);
 471		return -EINVAL;
 472	}
 473
 474	ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
 475			 DMA_TO_DEVICE);
 476	if (!ret) {
 477		dev_err(dev->device, "couldn't map in sg\n");
 478		return -EINVAL;
 479	}
 480
 481	ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
 482			 DMA_FROM_DEVICE);
 483	if (!ret) {
 484		dev_err(dev->device, "couldn't map out sg\n");
 485		goto unmap_in;
 486	}
 487
 488	/* Create input links */
 489	dev->hw_desc[idx]->p1 = dev->hw_phys_link[0];
 490	sg = dev->in_sg;
 491	len = dev->total;
 492	for (i = 0; i < dev->nb_in_sg; i++) {
 493		dev->hw_link[i]->len = min(len, sg->length);
 494		dev->hw_link[i]->p = sg->dma_address;
 495		if (i == (dev->nb_in_sg - 1)) {
 496			dev->hw_link[i]->next = 0;
 497		} else {
 498			len -= min(len, sg->length);
 499			dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
 500			sg = sg_next(sg);
 501		}
 502	}
 503
 504	/* Create output links */
 505	dev->hw_desc[idx]->p2 = dev->hw_phys_link[i];
 506	sg = dev->out_sg;
 507	len = dev->total;
 508	for (j = i; j < dev->nb_out_sg + i; j++) {
 509		dev->hw_link[j]->len = min(len, sg->length);
 510		dev->hw_link[j]->p = sg->dma_address;
 511		if (j == (dev->nb_out_sg + i - 1)) {
 512			dev->hw_link[j]->next = 0;
 513		} else {
 514			len -= min(len, sg->length);
 515			dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
 516			sg = sg_next(sg);
 517		}
 518	}
 519
 520	/* Fill remaining fields of hw_desc[1] */
 521	dev->hw_desc[idx]->hdr = sahara_aes_data_link_hdr(dev);
 522	dev->hw_desc[idx]->len1 = dev->total;
 523	dev->hw_desc[idx]->len2 = dev->total;
 524	dev->hw_desc[idx]->next = 0;
 525
 526	sahara_dump_descriptors(dev);
 527	sahara_dump_links(dev);
 528
 529	sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
 530
 531	return 0;
 532
 
 
 
 533unmap_in:
 534	dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
 535		DMA_TO_DEVICE);
 536
 537	return -EINVAL;
 538}
 539
 540static void sahara_aes_cbc_update_iv(struct skcipher_request *req)
 541{
 542	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
 543	struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
 544	unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
 545
 546	/* Update IV buffer to contain the last ciphertext block */
 547	if (rctx->mode & FLAGS_ENCRYPT) {
 548		sg_pcopy_to_buffer(req->dst, sg_nents(req->dst), req->iv,
 549				   ivsize, req->cryptlen - ivsize);
 550	} else {
 551		memcpy(req->iv, rctx->iv_out, ivsize);
 552	}
 553}
 554
 555static int sahara_aes_process(struct skcipher_request *req)
 556{
 557	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
 558	struct sahara_dev *dev = dev_ptr;
 559	struct sahara_ctx *ctx;
 560	struct sahara_aes_reqctx *rctx;
 561	int ret;
 562	unsigned long timeout;
 563
 564	/* Request is ready to be dispatched by the device */
 565	dev_dbg(dev->device,
 566		"dispatch request (nbytes=%d, src=%p, dst=%p)\n",
 567		req->cryptlen, req->src, req->dst);
 568
 569	/* assign new request to device */
 570	dev->total = req->cryptlen;
 571	dev->in_sg = req->src;
 572	dev->out_sg = req->dst;
 573
 574	rctx = skcipher_request_ctx(req);
 575	ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
 576	rctx->mode &= FLAGS_MODE_MASK;
 577	dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
 578
 579	if ((dev->flags & FLAGS_CBC) && req->iv) {
 580		unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
 581
 582		memcpy(dev->iv_base, req->iv, ivsize);
 583
 584		if (!(dev->flags & FLAGS_ENCRYPT)) {
 585			sg_pcopy_to_buffer(req->src, sg_nents(req->src),
 586					   rctx->iv_out, ivsize,
 587					   req->cryptlen - ivsize);
 588		}
 589	}
 590
 591	/* assign new context to device */
 592	dev->ctx = ctx;
 593
 594	reinit_completion(&dev->dma_completion);
 595
 596	ret = sahara_hw_descriptor_create(dev);
 597	if (ret)
 598		return -EINVAL;
 599
 600	timeout = wait_for_completion_timeout(&dev->dma_completion,
 601				msecs_to_jiffies(SAHARA_TIMEOUT_MS));
 602
 603	dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
 604		DMA_FROM_DEVICE);
 605	dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
 606		DMA_TO_DEVICE);
 607
 608	if (!timeout) {
 609		dev_err(dev->device, "AES timeout\n");
 610		return -ETIMEDOUT;
 611	}
 612
 613	if ((dev->flags & FLAGS_CBC) && req->iv)
 614		sahara_aes_cbc_update_iv(req);
 
 
 615
 616	return 0;
 617}
 618
 619static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
 620			     unsigned int keylen)
 621{
 622	struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
 
 623
 624	ctx->keylen = keylen;
 625
 626	/* SAHARA only supports 128bit keys */
 627	if (keylen == AES_KEYSIZE_128) {
 628		memcpy(ctx->key, key, keylen);
 
 629		return 0;
 630	}
 631
 632	if (keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
 
 633		return -EINVAL;
 634
 635	/*
 636	 * The requested key size is not supported by HW, do a fallback.
 637	 */
 638	crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
 639	crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
 640						 CRYPTO_TFM_REQ_MASK);
 641	return crypto_skcipher_setkey(ctx->fallback, key, keylen);
 642}
 643
 644static int sahara_aes_fallback(struct skcipher_request *req, unsigned long mode)
 645{
 646	struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
 647	struct sahara_ctx *ctx = crypto_skcipher_ctx(
 648		crypto_skcipher_reqtfm(req));
 649
 650	skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
 651	skcipher_request_set_callback(&rctx->fallback_req,
 652				      req->base.flags,
 653				      req->base.complete,
 654				      req->base.data);
 655	skcipher_request_set_crypt(&rctx->fallback_req, req->src,
 656				   req->dst, req->cryptlen, req->iv);
 657
 658	if (mode & FLAGS_ENCRYPT)
 659		return crypto_skcipher_encrypt(&rctx->fallback_req);
 660
 661	return crypto_skcipher_decrypt(&rctx->fallback_req);
 662}
 663
 664static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
 665{
 666	struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
 667	struct sahara_ctx *ctx = crypto_skcipher_ctx(
 668		crypto_skcipher_reqtfm(req));
 669	struct sahara_dev *dev = dev_ptr;
 670
 671	if (!req->cryptlen)
 672		return 0;
 673
 674	if (unlikely(ctx->keylen != AES_KEYSIZE_128))
 675		return sahara_aes_fallback(req, mode);
 676
 677	dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
 678		req->cryptlen, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
 679
 680	if (!IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE))
 
 
 681		return -EINVAL;
 
 682
 683	rctx->mode = mode;
 684
 685	return crypto_transfer_skcipher_request_to_engine(dev->engine, req);
 
 
 
 
 
 
 686}
 687
 688static int sahara_aes_ecb_encrypt(struct skcipher_request *req)
 689{
 
 
 
 
 
 
 
 
 
 
 
 
 
 690	return sahara_aes_crypt(req, FLAGS_ENCRYPT);
 691}
 692
 693static int sahara_aes_ecb_decrypt(struct skcipher_request *req)
 694{
 
 
 
 
 
 
 
 
 
 
 
 
 
 695	return sahara_aes_crypt(req, 0);
 696}
 697
 698static int sahara_aes_cbc_encrypt(struct skcipher_request *req)
 699{
 
 
 
 
 
 
 
 
 
 
 
 
 
 700	return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
 701}
 702
 703static int sahara_aes_cbc_decrypt(struct skcipher_request *req)
 704{
 
 
 
 
 
 
 
 
 
 
 
 
 
 705	return sahara_aes_crypt(req, FLAGS_CBC);
 706}
 707
 708static int sahara_aes_init_tfm(struct crypto_skcipher *tfm)
 709{
 710	const char *name = crypto_tfm_alg_name(&tfm->base);
 711	struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
 712
 713	ctx->fallback = crypto_alloc_skcipher(name, 0,
 714					      CRYPTO_ALG_NEED_FALLBACK);
 715	if (IS_ERR(ctx->fallback)) {
 716		pr_err("Error allocating fallback algo %s\n", name);
 717		return PTR_ERR(ctx->fallback);
 718	}
 719
 720	crypto_skcipher_set_reqsize(tfm, sizeof(struct sahara_aes_reqctx) +
 721					 crypto_skcipher_reqsize(ctx->fallback));
 722
 723	return 0;
 724}
 725
 726static void sahara_aes_exit_tfm(struct crypto_skcipher *tfm)
 727{
 728	struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
 729
 730	crypto_free_skcipher(ctx->fallback);
 
 
 731}
 732
 733static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
 734			      struct sahara_sha_reqctx *rctx)
 735{
 736	u32 hdr = 0;
 737
 738	hdr = rctx->mode;
 739
 740	if (rctx->first) {
 741		hdr |= SAHARA_HDR_MDHA_SET_MODE_HASH;
 742		hdr |= SAHARA_HDR_MDHA_INIT;
 743	} else {
 744		hdr |= SAHARA_HDR_MDHA_SET_MODE_MD_KEY;
 745	}
 746
 747	if (rctx->last)
 748		hdr |= SAHARA_HDR_MDHA_PDATA;
 749
 750	if (hweight_long(hdr) % 2 == 0)
 751		hdr |= SAHARA_HDR_PARITY_BIT;
 752
 753	return hdr;
 754}
 755
 756static int sahara_sha_hw_links_create(struct sahara_dev *dev,
 757				       struct sahara_sha_reqctx *rctx,
 758				       int start)
 759{
 760	struct scatterlist *sg;
 761	unsigned int len;
 762	unsigned int i;
 763	int ret;
 764
 765	dev->in_sg = rctx->in_sg;
 766
 767	dev->nb_in_sg = sg_nents_for_len(dev->in_sg, rctx->total);
 768	if (dev->nb_in_sg < 0) {
 769		dev_err(dev->device, "Invalid numbers of src SG.\n");
 770		return dev->nb_in_sg;
 771	}
 772	if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) {
 773		dev_err(dev->device, "not enough hw links (%d)\n",
 774			dev->nb_in_sg + dev->nb_out_sg);
 775		return -EINVAL;
 776	}
 777
 778	sg = dev->in_sg;
 779	ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg, DMA_TO_DEVICE);
 780	if (!ret)
 781		return -EFAULT;
 782
 783	len = rctx->total;
 784	for (i = start; i < dev->nb_in_sg + start; i++) {
 785		dev->hw_link[i]->len = min(len, sg->length);
 786		dev->hw_link[i]->p = sg->dma_address;
 787		if (i == (dev->nb_in_sg + start - 1)) {
 788			dev->hw_link[i]->next = 0;
 789		} else {
 790			len -= min(len, sg->length);
 791			dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
 792			sg = sg_next(sg);
 793		}
 794	}
 795
 796	return i;
 797}
 798
 799static int sahara_sha_hw_data_descriptor_create(struct sahara_dev *dev,
 800						struct sahara_sha_reqctx *rctx,
 801						struct ahash_request *req,
 802						int index)
 803{
 804	unsigned result_len;
 805	int i = index;
 806
 807	if (rctx->first)
 808		/* Create initial descriptor: #8*/
 809		dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
 810	else
 811		/* Create hash descriptor: #10. Must follow #6. */
 812		dev->hw_desc[index]->hdr = SAHARA_HDR_MDHA_HASH;
 813
 814	dev->hw_desc[index]->len1 = rctx->total;
 815	if (dev->hw_desc[index]->len1 == 0) {
 816		/* if len1 is 0, p1 must be 0, too */
 817		dev->hw_desc[index]->p1 = 0;
 818		rctx->sg_in_idx = 0;
 819	} else {
 820		/* Create input links */
 821		dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
 822		i = sahara_sha_hw_links_create(dev, rctx, index);
 823
 824		rctx->sg_in_idx = index;
 825		if (i < 0)
 826			return i;
 827	}
 828
 829	dev->hw_desc[index]->p2 = dev->hw_phys_link[i];
 830
 831	/* Save the context for the next operation */
 832	result_len = rctx->context_size;
 833	dev->hw_link[i]->p = dev->context_phys_base;
 834
 835	dev->hw_link[i]->len = result_len;
 836	dev->hw_desc[index]->len2 = result_len;
 837
 838	dev->hw_link[i]->next = 0;
 839
 840	return 0;
 841}
 842
 843/*
 844 * Load descriptor aka #6
 845 *
 846 * To load a previously saved context back to the MDHA unit
 847 *
 848 * p1: Saved Context
 849 * p2: NULL
 850 *
 851 */
 852static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
 853						struct sahara_sha_reqctx *rctx,
 854						struct ahash_request *req,
 855						int index)
 856{
 857	dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
 858
 859	dev->hw_desc[index]->len1 = rctx->context_size;
 860	dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
 861	dev->hw_desc[index]->len2 = 0;
 862	dev->hw_desc[index]->p2 = 0;
 863
 864	dev->hw_link[index]->len = rctx->context_size;
 865	dev->hw_link[index]->p = dev->context_phys_base;
 866	dev->hw_link[index]->next = 0;
 867
 868	return 0;
 869}
 870
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 871static int sahara_sha_prepare_request(struct ahash_request *req)
 872{
 873	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 874	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
 875	unsigned int hash_later;
 876	unsigned int block_size;
 877	unsigned int len;
 878
 879	block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
 880
 881	/* append bytes from previous operation */
 882	len = rctx->buf_cnt + req->nbytes;
 883
 884	/* only the last transfer can be padded in hardware */
 885	if (!rctx->last && (len < block_size)) {
 886		/* to few data, save for next operation */
 887		scatterwalk_map_and_copy(rctx->buf + rctx->buf_cnt, req->src,
 888					 0, req->nbytes, 0);
 889		rctx->buf_cnt += req->nbytes;
 890
 891		return 0;
 892	}
 893
 894	/* add data from previous operation first */
 895	if (rctx->buf_cnt)
 896		memcpy(rctx->rembuf, rctx->buf, rctx->buf_cnt);
 897
 898	/* data must always be a multiple of block_size */
 899	hash_later = rctx->last ? 0 : len & (block_size - 1);
 900	if (hash_later) {
 901		unsigned int offset = req->nbytes - hash_later;
 902		/* Save remaining bytes for later use */
 903		scatterwalk_map_and_copy(rctx->buf, req->src, offset,
 904					hash_later, 0);
 905	}
 906
 907	rctx->total = len - hash_later;
 
 
 
 
 908	/* have data from previous operation and current */
 909	if (rctx->buf_cnt && req->nbytes) {
 910		sg_init_table(rctx->in_sg_chain, 2);
 911		sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
 
 912		sg_chain(rctx->in_sg_chain, 2, req->src);
 
 
 913		rctx->in_sg = rctx->in_sg_chain;
 
 
 914	/* only data from previous operation */
 915	} else if (rctx->buf_cnt) {
 916		rctx->in_sg = rctx->in_sg_chain;
 
 
 
 
 917		sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
 
 918	/* no data from previous operation */
 919	} else {
 920		rctx->in_sg = req->src;
 
 
 921	}
 922
 923	/* on next call, we only have the remaining data in the buffer */
 924	rctx->buf_cnt = hash_later;
 925
 926	return -EINPROGRESS;
 927}
 928
 929static int sahara_sha_process(struct ahash_request *req)
 930{
 931	struct sahara_dev *dev = dev_ptr;
 932	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
 933	int ret;
 934	unsigned long timeout;
 935
 936	ret = sahara_sha_prepare_request(req);
 937	if (!ret)
 938		return ret;
 939
 940	if (rctx->first) {
 941		ret = sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
 942		if (ret)
 943			return ret;
 944
 945		dev->hw_desc[0]->next = 0;
 946		rctx->first = 0;
 947	} else {
 948		memcpy(dev->context_base, rctx->context, rctx->context_size);
 949
 950		sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
 951		dev->hw_desc[0]->next = dev->hw_phys_desc[1];
 952		ret = sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
 953		if (ret)
 954			return ret;
 955
 956		dev->hw_desc[1]->next = 0;
 957	}
 958
 959	sahara_dump_descriptors(dev);
 960	sahara_dump_links(dev);
 961
 962	reinit_completion(&dev->dma_completion);
 963
 964	sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
 965
 966	timeout = wait_for_completion_timeout(&dev->dma_completion,
 967				msecs_to_jiffies(SAHARA_TIMEOUT_MS));
 968
 969	if (rctx->sg_in_idx)
 970		dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
 971			     DMA_TO_DEVICE);
 972
 973	if (!timeout) {
 974		dev_err(dev->device, "SHA timeout\n");
 975		return -ETIMEDOUT;
 976	}
 977
 
 
 
 
 978	memcpy(rctx->context, dev->context_base, rctx->context_size);
 979
 980	if (req->result && rctx->last)
 981		memcpy(req->result, rctx->context, rctx->digest_size);
 982
 983	return 0;
 984}
 985
 986static int sahara_do_one_request(struct crypto_engine *engine, void *areq)
 987{
 988	struct crypto_async_request *async_req = areq;
 989	int err;
 
 
 990
 991	if (crypto_tfm_alg_type(async_req->tfm) == CRYPTO_ALG_TYPE_AHASH) {
 992		struct ahash_request *req = ahash_request_cast(async_req);
 993
 994		err = sahara_sha_process(req);
 995		local_bh_disable();
 996		crypto_finalize_hash_request(engine, req, err);
 997		local_bh_enable();
 998	} else {
 999		struct skcipher_request *req = skcipher_request_cast(async_req);
1000
1001		err = sahara_aes_process(skcipher_request_cast(async_req));
1002		local_bh_disable();
1003		crypto_finalize_skcipher_request(engine, req, err);
1004		local_bh_enable();
1005	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1006
1007	return 0;
1008}
1009
1010static int sahara_sha_enqueue(struct ahash_request *req, int last)
1011{
1012	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1013	struct sahara_dev *dev = dev_ptr;
 
1014
1015	if (!req->nbytes && !last)
1016		return 0;
1017
1018	rctx->last = last;
1019
1020	return crypto_transfer_hash_request_to_engine(dev->engine, req);
 
 
 
 
 
 
 
 
 
 
 
1021}
1022
1023static int sahara_sha_init(struct ahash_request *req)
1024{
1025	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1026	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1027
1028	memset(rctx, 0, sizeof(*rctx));
1029
1030	switch (crypto_ahash_digestsize(tfm)) {
1031	case SHA1_DIGEST_SIZE:
1032		rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA1;
1033		rctx->digest_size = SHA1_DIGEST_SIZE;
1034		break;
1035	case SHA256_DIGEST_SIZE:
1036		rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA256;
1037		rctx->digest_size = SHA256_DIGEST_SIZE;
1038		break;
1039	default:
1040		return -EINVAL;
1041	}
1042
1043	rctx->context_size = rctx->digest_size + 4;
1044	rctx->first = 1;
1045
1046	return 0;
1047}
1048
1049static int sahara_sha_update(struct ahash_request *req)
1050{
1051	return sahara_sha_enqueue(req, 0);
1052}
1053
1054static int sahara_sha_final(struct ahash_request *req)
1055{
1056	req->nbytes = 0;
1057	return sahara_sha_enqueue(req, 1);
1058}
1059
1060static int sahara_sha_finup(struct ahash_request *req)
1061{
1062	return sahara_sha_enqueue(req, 1);
1063}
1064
1065static int sahara_sha_digest(struct ahash_request *req)
1066{
1067	sahara_sha_init(req);
1068
1069	return sahara_sha_finup(req);
1070}
1071
1072static int sahara_sha_export(struct ahash_request *req, void *out)
1073{
1074	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1075
1076	memcpy(out, rctx, sizeof(struct sahara_sha_reqctx));
1077
1078	return 0;
1079}
1080
1081static int sahara_sha_import(struct ahash_request *req, const void *in)
1082{
1083	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1084
1085	memcpy(rctx, in, sizeof(struct sahara_sha_reqctx));
1086
1087	return 0;
1088}
1089
1090static int sahara_sha_cra_init(struct crypto_tfm *tfm)
1091{
 
 
 
 
 
 
 
 
 
1092	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1093				 sizeof(struct sahara_sha_reqctx));
 
1094
1095	return 0;
1096}
1097
1098static struct skcipher_engine_alg aes_algs[] = {
1099{
1100	.base = {
1101		.base.cra_name		= "ecb(aes)",
1102		.base.cra_driver_name	= "sahara-ecb-aes",
1103		.base.cra_priority	= 300,
1104		.base.cra_flags		= CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1105		.base.cra_blocksize	= AES_BLOCK_SIZE,
1106		.base.cra_ctxsize	= sizeof(struct sahara_ctx),
1107		.base.cra_alignmask	= 0x0,
1108		.base.cra_module	= THIS_MODULE,
1109
1110		.init			= sahara_aes_init_tfm,
1111		.exit			= sahara_aes_exit_tfm,
1112		.min_keysize		= AES_MIN_KEY_SIZE,
1113		.max_keysize		= AES_MAX_KEY_SIZE,
1114		.setkey			= sahara_aes_setkey,
1115		.encrypt		= sahara_aes_ecb_encrypt,
1116		.decrypt		= sahara_aes_ecb_decrypt,
1117	},
1118	.op = {
1119		.do_one_request = sahara_do_one_request,
1120	},
 
 
 
 
 
 
1121}, {
1122	.base = {
1123		.base.cra_name		= "cbc(aes)",
1124		.base.cra_driver_name	= "sahara-cbc-aes",
1125		.base.cra_priority	= 300,
1126		.base.cra_flags		= CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1127		.base.cra_blocksize	= AES_BLOCK_SIZE,
1128		.base.cra_ctxsize	= sizeof(struct sahara_ctx),
1129		.base.cra_alignmask	= 0x0,
1130		.base.cra_module	= THIS_MODULE,
1131
1132		.init			= sahara_aes_init_tfm,
1133		.exit			= sahara_aes_exit_tfm,
1134		.min_keysize		= AES_MIN_KEY_SIZE,
1135		.max_keysize		= AES_MAX_KEY_SIZE,
1136		.ivsize			= AES_BLOCK_SIZE,
1137		.setkey			= sahara_aes_setkey,
1138		.encrypt		= sahara_aes_cbc_encrypt,
1139		.decrypt		= sahara_aes_cbc_decrypt,
1140	},
1141	.op = {
1142		.do_one_request = sahara_do_one_request,
1143	},
1144}
1145};
1146
1147static struct ahash_engine_alg sha_v3_algs[] = {
1148{
1149	.base = {
1150		.init		= sahara_sha_init,
1151		.update		= sahara_sha_update,
1152		.final		= sahara_sha_final,
1153		.finup		= sahara_sha_finup,
1154		.digest		= sahara_sha_digest,
1155		.export		= sahara_sha_export,
1156		.import		= sahara_sha_import,
1157		.halg.digestsize	= SHA1_DIGEST_SIZE,
1158		.halg.statesize         = sizeof(struct sahara_sha_reqctx),
1159		.halg.base	= {
1160			.cra_name		= "sha1",
1161			.cra_driver_name	= "sahara-sha1",
1162			.cra_priority		= 300,
1163			.cra_flags		= CRYPTO_ALG_ASYNC |
1164							CRYPTO_ALG_NEED_FALLBACK,
1165			.cra_blocksize		= SHA1_BLOCK_SIZE,
1166			.cra_ctxsize		= sizeof(struct sahara_ctx),
1167			.cra_alignmask		= 0,
1168			.cra_module		= THIS_MODULE,
1169			.cra_init		= sahara_sha_cra_init,
1170		}
1171	},
1172	.op = {
1173		.do_one_request = sahara_do_one_request,
1174	},
1175},
1176};
1177
1178static struct ahash_engine_alg sha_v4_algs[] = {
1179{
1180	.base = {
1181		.init		= sahara_sha_init,
1182		.update		= sahara_sha_update,
1183		.final		= sahara_sha_final,
1184		.finup		= sahara_sha_finup,
1185		.digest		= sahara_sha_digest,
1186		.export		= sahara_sha_export,
1187		.import		= sahara_sha_import,
1188		.halg.digestsize	= SHA256_DIGEST_SIZE,
1189		.halg.statesize         = sizeof(struct sahara_sha_reqctx),
1190		.halg.base	= {
1191			.cra_name		= "sha256",
1192			.cra_driver_name	= "sahara-sha256",
1193			.cra_priority		= 300,
1194			.cra_flags		= CRYPTO_ALG_ASYNC |
1195							CRYPTO_ALG_NEED_FALLBACK,
1196			.cra_blocksize		= SHA256_BLOCK_SIZE,
1197			.cra_ctxsize		= sizeof(struct sahara_ctx),
1198			.cra_alignmask		= 0,
1199			.cra_module		= THIS_MODULE,
1200			.cra_init		= sahara_sha_cra_init,
1201		}
1202	},
1203	.op = {
1204		.do_one_request = sahara_do_one_request,
1205	},
1206},
1207};
1208
1209static irqreturn_t sahara_irq_handler(int irq, void *data)
1210{
1211	struct sahara_dev *dev = data;
1212	unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
1213	unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
1214
1215	sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR,
1216		     SAHARA_REG_CMD);
1217
1218	sahara_decode_status(dev, stat);
1219
1220	if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY)
1221		return IRQ_NONE;
1222
1223	if (SAHARA_STATUS_GET_STATE(stat) != SAHARA_STATE_COMPLETE)
 
1224		sahara_decode_error(dev, err);
 
 
1225
1226	complete(&dev->dma_completion);
1227
1228	return IRQ_HANDLED;
1229}
1230
1231
1232static int sahara_register_algs(struct sahara_dev *dev)
1233{
1234	int err;
 
1235
1236	err = crypto_engine_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
1237	if (err)
1238		return err;
1239
1240	err = crypto_engine_register_ahashes(sha_v3_algs,
1241					     ARRAY_SIZE(sha_v3_algs));
1242	if (err)
1243		goto err_aes_algs;
1244
1245	if (dev->version > SAHARA_VERSION_3) {
1246		err = crypto_engine_register_ahashes(sha_v4_algs,
1247						     ARRAY_SIZE(sha_v4_algs));
1248		if (err)
1249			goto err_sha_v3_algs;
1250	}
1251
 
 
 
 
 
 
 
1252	return 0;
1253
 
 
 
 
1254err_sha_v3_algs:
1255	crypto_engine_unregister_ahashes(sha_v3_algs, ARRAY_SIZE(sha_v3_algs));
 
1256
1257err_aes_algs:
1258	crypto_engine_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
 
1259
1260	return err;
1261}
1262
1263static void sahara_unregister_algs(struct sahara_dev *dev)
1264{
1265	crypto_engine_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
1266	crypto_engine_unregister_ahashes(sha_v3_algs, ARRAY_SIZE(sha_v3_algs));
 
 
 
 
 
1267
1268	if (dev->version > SAHARA_VERSION_3)
1269		crypto_engine_unregister_ahashes(sha_v4_algs,
1270						 ARRAY_SIZE(sha_v4_algs));
1271}
1272
1273static const struct of_device_id sahara_dt_ids[] = {
 
 
 
 
 
 
1274	{ .compatible = "fsl,imx53-sahara" },
1275	{ .compatible = "fsl,imx27-sahara" },
1276	{ /* sentinel */ }
1277};
1278MODULE_DEVICE_TABLE(of, sahara_dt_ids);
1279
1280static int sahara_probe(struct platform_device *pdev)
1281{
1282	struct sahara_dev *dev;
 
1283	u32 version;
1284	int irq;
1285	int err;
1286	int i;
1287
1288	dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
1289	if (!dev)
 
1290		return -ENOMEM;
 
1291
1292	dev->device = &pdev->dev;
1293	platform_set_drvdata(pdev, dev);
1294
1295	/* Get the base address */
1296	dev->regs_base = devm_platform_ioremap_resource(pdev, 0);
 
1297	if (IS_ERR(dev->regs_base))
1298		return PTR_ERR(dev->regs_base);
1299
1300	/* Get the IRQ */
1301	irq = platform_get_irq(pdev,  0);
1302	if (irq < 0)
 
1303		return irq;
 
1304
1305	err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
1306			       0, dev_name(&pdev->dev), dev);
1307	if (err)
1308		return dev_err_probe(&pdev->dev, err,
1309				     "failed to request irq\n");
 
1310
1311	/* clocks */
1312	dev->clk_ipg = devm_clk_get_enabled(&pdev->dev, "ipg");
1313	if (IS_ERR(dev->clk_ipg))
1314		return dev_err_probe(&pdev->dev, PTR_ERR(dev->clk_ipg),
1315				     "Could not get ipg clock\n");
1316
1317	dev->clk_ahb = devm_clk_get_enabled(&pdev->dev, "ahb");
1318	if (IS_ERR(dev->clk_ahb))
1319		return dev_err_probe(&pdev->dev, PTR_ERR(dev->clk_ahb),
1320				     "Could not get ahb clock\n");
 
 
1321
1322	/* Allocate HW descriptors */
1323	dev->hw_desc[0] = dmam_alloc_coherent(&pdev->dev,
1324			SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1325			&dev->hw_phys_desc[0], GFP_KERNEL);
1326	if (!dev->hw_desc[0])
 
1327		return -ENOMEM;
 
1328	dev->hw_desc[1] = dev->hw_desc[0] + 1;
1329	dev->hw_phys_desc[1] = dev->hw_phys_desc[0] +
1330				sizeof(struct sahara_hw_desc);
1331
1332	/* Allocate space for iv and key */
1333	dev->key_base = dmam_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
1334				&dev->key_phys_base, GFP_KERNEL);
1335	if (!dev->key_base)
 
1336		return -ENOMEM;
 
1337	dev->iv_base = dev->key_base + AES_KEYSIZE_128;
1338	dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
1339
1340	/* Allocate space for context: largest digest + message length field */
1341	dev->context_base = dmam_alloc_coherent(&pdev->dev,
1342					SHA256_DIGEST_SIZE + 4,
1343					&dev->context_phys_base, GFP_KERNEL);
1344	if (!dev->context_base)
 
1345		return -ENOMEM;
 
1346
1347	/* Allocate space for HW links */
1348	dev->hw_link[0] = dmam_alloc_coherent(&pdev->dev,
1349			SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
1350			&dev->hw_phys_link[0], GFP_KERNEL);
1351	if (!dev->hw_link[0])
 
1352		return -ENOMEM;
 
1353	for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
1354		dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
1355					sizeof(struct sahara_hw_link);
1356		dev->hw_link[i] = dev->hw_link[i - 1] + 1;
1357	}
1358
1359	dev_ptr = dev;
1360
1361	dev->engine = crypto_engine_alloc_init(&pdev->dev, true);
1362	if (!dev->engine)
1363		return -ENOMEM;
1364
1365	err = crypto_engine_start(dev->engine);
1366	if (err) {
1367		crypto_engine_exit(dev->engine);
1368		return dev_err_probe(&pdev->dev, err,
1369				     "Could not start crypto engine\n");
1370	}
1371
1372	init_completion(&dev->dma_completion);
1373
 
 
 
 
 
 
 
1374	version = sahara_read(dev, SAHARA_REG_VERSION);
1375	if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) {
1376		if (version != SAHARA_VERSION_3)
1377			err = -ENODEV;
1378	} else if (of_device_is_compatible(pdev->dev.of_node,
1379			"fsl,imx53-sahara")) {
1380		if (((version >> 8) & 0xff) != SAHARA_VERSION_4)
1381			err = -ENODEV;
1382		version = (version >> 8) & 0xff;
1383	}
1384	if (err == -ENODEV) {
1385		dev_err_probe(&pdev->dev, err,
1386			      "SAHARA version %d not supported\n", version);
1387		goto err_algs;
1388	}
1389
1390	dev->version = version;
1391
1392	sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH,
1393		     SAHARA_REG_CMD);
1394	sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) |
1395			SAHARA_CONTROL_SET_MAXBURST(8) |
1396			SAHARA_CONTROL_RNG_AUTORSD |
1397			SAHARA_CONTROL_ENABLE_INT,
1398			SAHARA_REG_CONTROL);
1399
1400	err = sahara_register_algs(dev);
1401	if (err)
1402		goto err_algs;
1403
1404	dev_info(&pdev->dev, "SAHARA version %d initialized\n", version);
1405
1406	return 0;
1407
1408err_algs:
1409	crypto_engine_exit(dev->engine);
 
 
 
 
1410
1411	return err;
1412}
1413
1414static void sahara_remove(struct platform_device *pdev)
1415{
1416	struct sahara_dev *dev = platform_get_drvdata(pdev);
1417
1418	crypto_engine_exit(dev->engine);
 
1419	sahara_unregister_algs(dev);
 
 
 
 
 
 
 
1420}
1421
1422static struct platform_driver sahara_driver = {
1423	.probe		= sahara_probe,
1424	.remove_new	= sahara_remove,
1425	.driver		= {
1426		.name	= SAHARA_NAME,
1427		.of_match_table = sahara_dt_ids,
1428	},
 
1429};
1430
1431module_platform_driver(sahara_driver);
1432
1433MODULE_LICENSE("GPL");
1434MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
1435MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de>");
1436MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");
v4.6
 
   1/*
   2 * Cryptographic API.
   3 *
   4 * Support for SAHARA cryptographic accelerator.
   5 *
   6 * Copyright (c) 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
   7 * Copyright (c) 2013 Vista Silicon S.L.
   8 * Author: Javier Martin <javier.martin@vista-silicon.com>
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License version 2 as published
  12 * by the Free Software Foundation.
  13 *
  14 * Based on omap-aes.c and tegra-aes.c
  15 */
  16
  17#include <crypto/algapi.h>
  18#include <crypto/aes.h>
  19#include <crypto/hash.h>
  20#include <crypto/internal/hash.h>
 
  21#include <crypto/scatterwalk.h>
  22#include <crypto/sha.h>
 
 
  23
  24#include <linux/clk.h>
  25#include <linux/crypto.h>
  26#include <linux/interrupt.h>
  27#include <linux/io.h>
  28#include <linux/irq.h>
  29#include <linux/kernel.h>
  30#include <linux/kthread.h>
  31#include <linux/module.h>
  32#include <linux/mutex.h>
  33#include <linux/of.h>
  34#include <linux/of_device.h>
  35#include <linux/platform_device.h>
 
  36
  37#define SHA_BUFFER_LEN		PAGE_SIZE
  38#define SAHARA_MAX_SHA_BLOCK_SIZE	SHA256_BLOCK_SIZE
  39
  40#define SAHARA_NAME "sahara"
  41#define SAHARA_VERSION_3	3
  42#define SAHARA_VERSION_4	4
  43#define SAHARA_TIMEOUT_MS	1000
  44#define SAHARA_MAX_HW_DESC	2
  45#define SAHARA_MAX_HW_LINK	20
  46
  47#define FLAGS_MODE_MASK		0x000f
  48#define FLAGS_ENCRYPT		BIT(0)
  49#define FLAGS_CBC		BIT(1)
  50#define FLAGS_NEW_KEY		BIT(3)
  51
  52#define SAHARA_HDR_BASE			0x00800000
  53#define SAHARA_HDR_SKHA_ALG_AES	0
  54#define SAHARA_HDR_SKHA_OP_ENC		(1 << 2)
  55#define SAHARA_HDR_SKHA_MODE_ECB	(0 << 3)
  56#define SAHARA_HDR_SKHA_MODE_CBC	(1 << 3)
  57#define SAHARA_HDR_FORM_DATA		(5 << 16)
  58#define SAHARA_HDR_FORM_KEY		(8 << 16)
  59#define SAHARA_HDR_LLO			(1 << 24)
  60#define SAHARA_HDR_CHA_SKHA		(1 << 28)
  61#define SAHARA_HDR_CHA_MDHA		(2 << 28)
  62#define SAHARA_HDR_PARITY_BIT		(1 << 31)
  63
  64#define SAHARA_HDR_MDHA_SET_MODE_MD_KEY	0x20880000
  65#define SAHARA_HDR_MDHA_SET_MODE_HASH	0x208D0000
  66#define SAHARA_HDR_MDHA_HASH		0xA0850000
  67#define SAHARA_HDR_MDHA_STORE_DIGEST	0x20820000
  68#define SAHARA_HDR_MDHA_ALG_SHA1	0
  69#define SAHARA_HDR_MDHA_ALG_MD5		1
  70#define SAHARA_HDR_MDHA_ALG_SHA256	2
  71#define SAHARA_HDR_MDHA_ALG_SHA224	3
  72#define SAHARA_HDR_MDHA_PDATA		(1 << 2)
  73#define SAHARA_HDR_MDHA_HMAC		(1 << 3)
  74#define SAHARA_HDR_MDHA_INIT		(1 << 5)
  75#define SAHARA_HDR_MDHA_IPAD		(1 << 6)
  76#define SAHARA_HDR_MDHA_OPAD		(1 << 7)
  77#define SAHARA_HDR_MDHA_SWAP		(1 << 8)
  78#define SAHARA_HDR_MDHA_MAC_FULL	(1 << 9)
  79#define SAHARA_HDR_MDHA_SSL		(1 << 10)
  80
  81/* SAHARA can only process one request at a time */
  82#define SAHARA_QUEUE_LENGTH	1
  83
  84#define SAHARA_REG_VERSION	0x00
  85#define SAHARA_REG_DAR		0x04
  86#define SAHARA_REG_CONTROL	0x08
  87#define		SAHARA_CONTROL_SET_THROTTLE(x)	(((x) & 0xff) << 24)
  88#define		SAHARA_CONTROL_SET_MAXBURST(x)	(((x) & 0xff) << 16)
  89#define		SAHARA_CONTROL_RNG_AUTORSD	(1 << 7)
  90#define		SAHARA_CONTROL_ENABLE_INT	(1 << 4)
  91#define SAHARA_REG_CMD		0x0C
  92#define		SAHARA_CMD_RESET		(1 << 0)
  93#define		SAHARA_CMD_CLEAR_INT		(1 << 8)
  94#define		SAHARA_CMD_CLEAR_ERR		(1 << 9)
  95#define		SAHARA_CMD_SINGLE_STEP		(1 << 10)
  96#define		SAHARA_CMD_MODE_BATCH		(1 << 16)
  97#define		SAHARA_CMD_MODE_DEBUG		(1 << 18)
  98#define	SAHARA_REG_STATUS	0x10
  99#define		SAHARA_STATUS_GET_STATE(x)	((x) & 0x7)
 100#define			SAHARA_STATE_IDLE	0
 101#define			SAHARA_STATE_BUSY	1
 102#define			SAHARA_STATE_ERR	2
 103#define			SAHARA_STATE_FAULT	3
 104#define			SAHARA_STATE_COMPLETE	4
 105#define			SAHARA_STATE_COMP_FLAG	(1 << 2)
 106#define		SAHARA_STATUS_DAR_FULL		(1 << 3)
 107#define		SAHARA_STATUS_ERROR		(1 << 4)
 108#define		SAHARA_STATUS_SECURE		(1 << 5)
 109#define		SAHARA_STATUS_FAIL		(1 << 6)
 110#define		SAHARA_STATUS_INIT		(1 << 7)
 111#define		SAHARA_STATUS_RNG_RESEED	(1 << 8)
 112#define		SAHARA_STATUS_ACTIVE_RNG	(1 << 9)
 113#define		SAHARA_STATUS_ACTIVE_MDHA	(1 << 10)
 114#define		SAHARA_STATUS_ACTIVE_SKHA	(1 << 11)
 115#define		SAHARA_STATUS_MODE_BATCH	(1 << 16)
 116#define		SAHARA_STATUS_MODE_DEDICATED	(1 << 17)
 117#define		SAHARA_STATUS_MODE_DEBUG	(1 << 18)
 118#define		SAHARA_STATUS_GET_ISTATE(x)	(((x) >> 24) & 0xff)
 119#define SAHARA_REG_ERRSTATUS	0x14
 120#define		SAHARA_ERRSTATUS_GET_SOURCE(x)	((x) & 0xf)
 121#define			SAHARA_ERRSOURCE_CHA	14
 122#define			SAHARA_ERRSOURCE_DMA	15
 123#define		SAHARA_ERRSTATUS_DMA_DIR	(1 << 8)
 124#define		SAHARA_ERRSTATUS_GET_DMASZ(x)(((x) >> 9) & 0x3)
 125#define		SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7)
 126#define		SAHARA_ERRSTATUS_GET_CHASRC(x)	(((x) >> 16) & 0xfff)
 127#define		SAHARA_ERRSTATUS_GET_CHAERR(x)	(((x) >> 28) & 0x3)
 128#define SAHARA_REG_FADDR	0x18
 129#define SAHARA_REG_CDAR		0x1C
 130#define SAHARA_REG_IDAR		0x20
 131
 132struct sahara_hw_desc {
 133	u32	hdr;
 134	u32	len1;
 135	u32	p1;
 136	u32	len2;
 137	u32	p2;
 138	u32	next;
 139};
 140
 141struct sahara_hw_link {
 142	u32	len;
 143	u32	p;
 144	u32	next;
 145};
 146
 147struct sahara_ctx {
 148	unsigned long flags;
 149
 150	/* AES-specific context */
 151	int keylen;
 152	u8 key[AES_KEYSIZE_128];
 153	struct crypto_ablkcipher *fallback;
 154
 155	/* SHA-specific context */
 156	struct crypto_shash *shash_fallback;
 157};
 158
 159struct sahara_aes_reqctx {
 160	unsigned long mode;
 
 
 161};
 162
 163/*
 164 * struct sahara_sha_reqctx - private data per request
 165 * @buf: holds data for requests smaller than block_size
 166 * @rembuf: used to prepare one block_size-aligned request
 167 * @context: hw-specific context for request. Digest is extracted from this
 168 * @mode: specifies what type of hw-descriptor needs to be built
 169 * @digest_size: length of digest for this request
 170 * @context_size: length of hw-context for this request.
 171 *                Always digest_size + 4
 172 * @buf_cnt: number of bytes saved in buf
 173 * @sg_in_idx: number of hw links
 174 * @in_sg: scatterlist for input data
 175 * @in_sg_chain: scatterlists for chained input data
 176 * @total: total number of bytes for transfer
 177 * @last: is this the last block
 178 * @first: is this the first block
 179 * @active: inside a transfer
 180 */
 181struct sahara_sha_reqctx {
 182	u8			buf[SAHARA_MAX_SHA_BLOCK_SIZE];
 183	u8			rembuf[SAHARA_MAX_SHA_BLOCK_SIZE];
 184	u8			context[SHA256_DIGEST_SIZE + 4];
 185	unsigned int		mode;
 186	unsigned int		digest_size;
 187	unsigned int		context_size;
 188	unsigned int		buf_cnt;
 189	unsigned int		sg_in_idx;
 190	struct scatterlist	*in_sg;
 191	struct scatterlist	in_sg_chain[2];
 192	size_t			total;
 193	unsigned int		last;
 194	unsigned int		first;
 195	unsigned int		active;
 196};
 197
 198struct sahara_dev {
 199	struct device		*device;
 200	unsigned int		version;
 201	void __iomem		*regs_base;
 202	struct clk		*clk_ipg;
 203	struct clk		*clk_ahb;
 204	struct mutex		queue_mutex;
 205	struct task_struct	*kthread;
 206	struct completion	dma_completion;
 207
 208	struct sahara_ctx	*ctx;
 209	spinlock_t		lock;
 210	struct crypto_queue	queue;
 211	unsigned long		flags;
 212
 213	struct sahara_hw_desc	*hw_desc[SAHARA_MAX_HW_DESC];
 214	dma_addr_t		hw_phys_desc[SAHARA_MAX_HW_DESC];
 215
 216	u8			*key_base;
 217	dma_addr_t		key_phys_base;
 218
 219	u8			*iv_base;
 220	dma_addr_t		iv_phys_base;
 221
 222	u8			*context_base;
 223	dma_addr_t		context_phys_base;
 224
 225	struct sahara_hw_link	*hw_link[SAHARA_MAX_HW_LINK];
 226	dma_addr_t		hw_phys_link[SAHARA_MAX_HW_LINK];
 227
 228	size_t			total;
 229	struct scatterlist	*in_sg;
 230	int		nb_in_sg;
 231	struct scatterlist	*out_sg;
 232	int		nb_out_sg;
 233
 234	u32			error;
 235};
 236
 237static struct sahara_dev *dev_ptr;
 238
 239static inline void sahara_write(struct sahara_dev *dev, u32 data, u32 reg)
 240{
 241	writel(data, dev->regs_base + reg);
 242}
 243
 244static inline unsigned int sahara_read(struct sahara_dev *dev, u32 reg)
 245{
 246	return readl(dev->regs_base + reg);
 247}
 248
 249static u32 sahara_aes_key_hdr(struct sahara_dev *dev)
 250{
 251	u32 hdr = SAHARA_HDR_BASE | SAHARA_HDR_SKHA_ALG_AES |
 252			SAHARA_HDR_FORM_KEY | SAHARA_HDR_LLO |
 253			SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
 254
 255	if (dev->flags & FLAGS_CBC) {
 256		hdr |= SAHARA_HDR_SKHA_MODE_CBC;
 257		hdr ^= SAHARA_HDR_PARITY_BIT;
 258	}
 259
 260	if (dev->flags & FLAGS_ENCRYPT) {
 261		hdr |= SAHARA_HDR_SKHA_OP_ENC;
 262		hdr ^= SAHARA_HDR_PARITY_BIT;
 263	}
 264
 265	return hdr;
 266}
 267
 268static u32 sahara_aes_data_link_hdr(struct sahara_dev *dev)
 269{
 270	return SAHARA_HDR_BASE | SAHARA_HDR_FORM_DATA |
 271			SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
 272}
 273
 274static const char *sahara_err_src[16] = {
 275	"No error",
 276	"Header error",
 277	"Descriptor length error",
 278	"Descriptor length or pointer error",
 279	"Link length error",
 280	"Link pointer error",
 281	"Input buffer error",
 282	"Output buffer error",
 283	"Output buffer starvation",
 284	"Internal state fault",
 285	"General descriptor problem",
 286	"Reserved",
 287	"Descriptor address error",
 288	"Link address error",
 289	"CHA error",
 290	"DMA error"
 291};
 292
 293static const char *sahara_err_dmasize[4] = {
 294	"Byte transfer",
 295	"Half-word transfer",
 296	"Word transfer",
 297	"Reserved"
 298};
 299
 300static const char *sahara_err_dmasrc[8] = {
 301	"No error",
 302	"AHB bus error",
 303	"Internal IP bus error",
 304	"Parity error",
 305	"DMA crosses 256 byte boundary",
 306	"DMA is busy",
 307	"Reserved",
 308	"DMA HW error"
 309};
 310
 311static const char *sahara_cha_errsrc[12] = {
 312	"Input buffer non-empty",
 313	"Illegal address",
 314	"Illegal mode",
 315	"Illegal data size",
 316	"Illegal key size",
 317	"Write during processing",
 318	"CTX read during processing",
 319	"HW error",
 320	"Input buffer disabled/underflow",
 321	"Output buffer disabled/overflow",
 322	"DES key parity error",
 323	"Reserved"
 324};
 325
 326static const char *sahara_cha_err[4] = { "No error", "SKHA", "MDHA", "RNG" };
 327
 328static void sahara_decode_error(struct sahara_dev *dev, unsigned int error)
 329{
 330	u8 source = SAHARA_ERRSTATUS_GET_SOURCE(error);
 331	u16 chasrc = ffs(SAHARA_ERRSTATUS_GET_CHASRC(error));
 332
 333	dev_err(dev->device, "%s: Error Register = 0x%08x\n", __func__, error);
 334
 335	dev_err(dev->device, "	- %s.\n", sahara_err_src[source]);
 336
 337	if (source == SAHARA_ERRSOURCE_DMA) {
 338		if (error & SAHARA_ERRSTATUS_DMA_DIR)
 339			dev_err(dev->device, "		* DMA read.\n");
 340		else
 341			dev_err(dev->device, "		* DMA write.\n");
 342
 343		dev_err(dev->device, "		* %s.\n",
 344		       sahara_err_dmasize[SAHARA_ERRSTATUS_GET_DMASZ(error)]);
 345		dev_err(dev->device, "		* %s.\n",
 346		       sahara_err_dmasrc[SAHARA_ERRSTATUS_GET_DMASRC(error)]);
 347	} else if (source == SAHARA_ERRSOURCE_CHA) {
 348		dev_err(dev->device, "		* %s.\n",
 349			sahara_cha_errsrc[chasrc]);
 350		dev_err(dev->device, "		* %s.\n",
 351		       sahara_cha_err[SAHARA_ERRSTATUS_GET_CHAERR(error)]);
 352	}
 353	dev_err(dev->device, "\n");
 354}
 355
 356static const char *sahara_state[4] = { "Idle", "Busy", "Error", "HW Fault" };
 357
 358static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
 359{
 360	u8 state;
 361
 362	if (!IS_ENABLED(DEBUG))
 363		return;
 364
 365	state = SAHARA_STATUS_GET_STATE(status);
 366
 367	dev_dbg(dev->device, "%s: Status Register = 0x%08x\n",
 368		__func__, status);
 369
 370	dev_dbg(dev->device, "	- State = %d:\n", state);
 371	if (state & SAHARA_STATE_COMP_FLAG)
 372		dev_dbg(dev->device, "		* Descriptor completed. IRQ pending.\n");
 373
 374	dev_dbg(dev->device, "		* %s.\n",
 375	       sahara_state[state & ~SAHARA_STATE_COMP_FLAG]);
 376
 377	if (status & SAHARA_STATUS_DAR_FULL)
 378		dev_dbg(dev->device, "	- DAR Full.\n");
 379	if (status & SAHARA_STATUS_ERROR)
 380		dev_dbg(dev->device, "	- Error.\n");
 381	if (status & SAHARA_STATUS_SECURE)
 382		dev_dbg(dev->device, "	- Secure.\n");
 383	if (status & SAHARA_STATUS_FAIL)
 384		dev_dbg(dev->device, "	- Fail.\n");
 385	if (status & SAHARA_STATUS_RNG_RESEED)
 386		dev_dbg(dev->device, "	- RNG Reseed Request.\n");
 387	if (status & SAHARA_STATUS_ACTIVE_RNG)
 388		dev_dbg(dev->device, "	- RNG Active.\n");
 389	if (status & SAHARA_STATUS_ACTIVE_MDHA)
 390		dev_dbg(dev->device, "	- MDHA Active.\n");
 391	if (status & SAHARA_STATUS_ACTIVE_SKHA)
 392		dev_dbg(dev->device, "	- SKHA Active.\n");
 393
 394	if (status & SAHARA_STATUS_MODE_BATCH)
 395		dev_dbg(dev->device, "	- Batch Mode.\n");
 396	else if (status & SAHARA_STATUS_MODE_DEDICATED)
 397		dev_dbg(dev->device, "	- Decidated Mode.\n");
 398	else if (status & SAHARA_STATUS_MODE_DEBUG)
 399		dev_dbg(dev->device, "	- Debug Mode.\n");
 400
 401	dev_dbg(dev->device, "	- Internal state = 0x%02x\n",
 402	       SAHARA_STATUS_GET_ISTATE(status));
 403
 404	dev_dbg(dev->device, "Current DAR: 0x%08x\n",
 405		sahara_read(dev, SAHARA_REG_CDAR));
 406	dev_dbg(dev->device, "Initial DAR: 0x%08x\n\n",
 407		sahara_read(dev, SAHARA_REG_IDAR));
 408}
 409
 410static void sahara_dump_descriptors(struct sahara_dev *dev)
 411{
 412	int i;
 413
 414	if (!IS_ENABLED(DEBUG))
 415		return;
 416
 417	for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
 418		dev_dbg(dev->device, "Descriptor (%d) (%pad):\n",
 419			i, &dev->hw_phys_desc[i]);
 420		dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr);
 421		dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1);
 422		dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1);
 423		dev_dbg(dev->device, "\tlen2 = %u\n", dev->hw_desc[i]->len2);
 424		dev_dbg(dev->device, "\tp2 = 0x%08x\n", dev->hw_desc[i]->p2);
 425		dev_dbg(dev->device, "\tnext = 0x%08x\n",
 426			dev->hw_desc[i]->next);
 427	}
 428	dev_dbg(dev->device, "\n");
 429}
 430
 431static void sahara_dump_links(struct sahara_dev *dev)
 432{
 433	int i;
 434
 435	if (!IS_ENABLED(DEBUG))
 436		return;
 437
 438	for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
 439		dev_dbg(dev->device, "Link (%d) (%pad):\n",
 440			i, &dev->hw_phys_link[i]);
 441		dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len);
 442		dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p);
 443		dev_dbg(dev->device, "\tnext = 0x%08x\n",
 444			dev->hw_link[i]->next);
 445	}
 446	dev_dbg(dev->device, "\n");
 447}
 448
 449static int sahara_hw_descriptor_create(struct sahara_dev *dev)
 450{
 451	struct sahara_ctx *ctx = dev->ctx;
 452	struct scatterlist *sg;
 453	int ret;
 454	int i, j;
 455	int idx = 0;
 
 456
 457	/* Copy new key if necessary */
 458	if (ctx->flags & FLAGS_NEW_KEY) {
 459		memcpy(dev->key_base, ctx->key, ctx->keylen);
 460		ctx->flags &= ~FLAGS_NEW_KEY;
 461
 462		if (dev->flags & FLAGS_CBC) {
 463			dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
 464			dev->hw_desc[idx]->p1 = dev->iv_phys_base;
 465		} else {
 466			dev->hw_desc[idx]->len1 = 0;
 467			dev->hw_desc[idx]->p1 = 0;
 468		}
 469		dev->hw_desc[idx]->len2 = ctx->keylen;
 470		dev->hw_desc[idx]->p2 = dev->key_phys_base;
 471		dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
 472
 473		dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
 474
 475		idx++;
 476	}
 477
 478	dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total);
 479	if (dev->nb_in_sg < 0) {
 480		dev_err(dev->device, "Invalid numbers of src SG.\n");
 481		return dev->nb_in_sg;
 482	}
 483	dev->nb_out_sg = sg_nents_for_len(dev->out_sg, dev->total);
 484	if (dev->nb_out_sg < 0) {
 485		dev_err(dev->device, "Invalid numbers of dst SG.\n");
 486		return dev->nb_out_sg;
 487	}
 488	if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
 489		dev_err(dev->device, "not enough hw links (%d)\n",
 490			dev->nb_in_sg + dev->nb_out_sg);
 491		return -EINVAL;
 492	}
 493
 494	ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
 495			 DMA_TO_DEVICE);
 496	if (ret != dev->nb_in_sg) {
 497		dev_err(dev->device, "couldn't map in sg\n");
 498		goto unmap_in;
 499	}
 
 500	ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
 501			 DMA_FROM_DEVICE);
 502	if (ret != dev->nb_out_sg) {
 503		dev_err(dev->device, "couldn't map out sg\n");
 504		goto unmap_out;
 505	}
 506
 507	/* Create input links */
 508	dev->hw_desc[idx]->p1 = dev->hw_phys_link[0];
 509	sg = dev->in_sg;
 
 510	for (i = 0; i < dev->nb_in_sg; i++) {
 511		dev->hw_link[i]->len = sg->length;
 512		dev->hw_link[i]->p = sg->dma_address;
 513		if (i == (dev->nb_in_sg - 1)) {
 514			dev->hw_link[i]->next = 0;
 515		} else {
 
 516			dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
 517			sg = sg_next(sg);
 518		}
 519	}
 520
 521	/* Create output links */
 522	dev->hw_desc[idx]->p2 = dev->hw_phys_link[i];
 523	sg = dev->out_sg;
 
 524	for (j = i; j < dev->nb_out_sg + i; j++) {
 525		dev->hw_link[j]->len = sg->length;
 526		dev->hw_link[j]->p = sg->dma_address;
 527		if (j == (dev->nb_out_sg + i - 1)) {
 528			dev->hw_link[j]->next = 0;
 529		} else {
 
 530			dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
 531			sg = sg_next(sg);
 532		}
 533	}
 534
 535	/* Fill remaining fields of hw_desc[1] */
 536	dev->hw_desc[idx]->hdr = sahara_aes_data_link_hdr(dev);
 537	dev->hw_desc[idx]->len1 = dev->total;
 538	dev->hw_desc[idx]->len2 = dev->total;
 539	dev->hw_desc[idx]->next = 0;
 540
 541	sahara_dump_descriptors(dev);
 542	sahara_dump_links(dev);
 543
 544	sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
 545
 546	return 0;
 547
 548unmap_out:
 549	dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
 550		DMA_TO_DEVICE);
 551unmap_in:
 552	dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
 553		DMA_FROM_DEVICE);
 554
 555	return -EINVAL;
 556}
 557
 558static int sahara_aes_process(struct ablkcipher_request *req)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 559{
 
 560	struct sahara_dev *dev = dev_ptr;
 561	struct sahara_ctx *ctx;
 562	struct sahara_aes_reqctx *rctx;
 563	int ret;
 564	unsigned long timeout;
 565
 566	/* Request is ready to be dispatched by the device */
 567	dev_dbg(dev->device,
 568		"dispatch request (nbytes=%d, src=%p, dst=%p)\n",
 569		req->nbytes, req->src, req->dst);
 570
 571	/* assign new request to device */
 572	dev->total = req->nbytes;
 573	dev->in_sg = req->src;
 574	dev->out_sg = req->dst;
 575
 576	rctx = ablkcipher_request_ctx(req);
 577	ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
 578	rctx->mode &= FLAGS_MODE_MASK;
 579	dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
 580
 581	if ((dev->flags & FLAGS_CBC) && req->info)
 582		memcpy(dev->iv_base, req->info, AES_KEYSIZE_128);
 
 
 
 
 
 
 
 
 
 583
 584	/* assign new context to device */
 585	dev->ctx = ctx;
 586
 587	reinit_completion(&dev->dma_completion);
 588
 589	ret = sahara_hw_descriptor_create(dev);
 590	if (ret)
 591		return -EINVAL;
 592
 593	timeout = wait_for_completion_timeout(&dev->dma_completion,
 594				msecs_to_jiffies(SAHARA_TIMEOUT_MS));
 
 
 
 
 
 
 595	if (!timeout) {
 596		dev_err(dev->device, "AES timeout\n");
 597		return -ETIMEDOUT;
 598	}
 599
 600	dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
 601		DMA_TO_DEVICE);
 602	dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
 603		DMA_FROM_DEVICE);
 604
 605	return 0;
 606}
 607
 608static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
 609			     unsigned int keylen)
 610{
 611	struct sahara_ctx *ctx = crypto_ablkcipher_ctx(tfm);
 612	int ret;
 613
 614	ctx->keylen = keylen;
 615
 616	/* SAHARA only supports 128bit keys */
 617	if (keylen == AES_KEYSIZE_128) {
 618		memcpy(ctx->key, key, keylen);
 619		ctx->flags |= FLAGS_NEW_KEY;
 620		return 0;
 621	}
 622
 623	if (keylen != AES_KEYSIZE_128 &&
 624	    keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
 625		return -EINVAL;
 626
 627	/*
 628	 * The requested key size is not supported by HW, do a fallback.
 629	 */
 630	ctx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
 631	ctx->fallback->base.crt_flags |=
 632		(tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
 
 
 633
 634	ret = crypto_ablkcipher_setkey(ctx->fallback, key, keylen);
 635	if (ret) {
 636		struct crypto_tfm *tfm_aux = crypto_ablkcipher_tfm(tfm);
 
 
 637
 638		tfm_aux->crt_flags &= ~CRYPTO_TFM_RES_MASK;
 639		tfm_aux->crt_flags |=
 640			(ctx->fallback->base.crt_flags & CRYPTO_TFM_RES_MASK);
 641	}
 642	return ret;
 
 
 
 
 
 
 
 643}
 644
 645static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
 646{
 647	struct sahara_aes_reqctx *rctx = ablkcipher_request_ctx(req);
 
 
 648	struct sahara_dev *dev = dev_ptr;
 649	int err = 0;
 
 
 
 
 
 650
 651	dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
 652		req->nbytes, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
 653
 654	if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
 655		dev_err(dev->device,
 656			"request size is not exact amount of AES blocks\n");
 657		return -EINVAL;
 658	}
 659
 660	rctx->mode = mode;
 661
 662	mutex_lock(&dev->queue_mutex);
 663	err = ablkcipher_enqueue_request(&dev->queue, req);
 664	mutex_unlock(&dev->queue_mutex);
 665
 666	wake_up_process(dev->kthread);
 667
 668	return err;
 669}
 670
 671static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req)
 672{
 673	struct crypto_tfm *tfm =
 674		crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
 675	struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
 676		crypto_ablkcipher_reqtfm(req));
 677	int err;
 678
 679	if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
 680		ablkcipher_request_set_tfm(req, ctx->fallback);
 681		err = crypto_ablkcipher_encrypt(req);
 682		ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
 683		return err;
 684	}
 685
 686	return sahara_aes_crypt(req, FLAGS_ENCRYPT);
 687}
 688
 689static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req)
 690{
 691	struct crypto_tfm *tfm =
 692		crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
 693	struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
 694		crypto_ablkcipher_reqtfm(req));
 695	int err;
 696
 697	if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
 698		ablkcipher_request_set_tfm(req, ctx->fallback);
 699		err = crypto_ablkcipher_decrypt(req);
 700		ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
 701		return err;
 702	}
 703
 704	return sahara_aes_crypt(req, 0);
 705}
 706
 707static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req)
 708{
 709	struct crypto_tfm *tfm =
 710		crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
 711	struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
 712		crypto_ablkcipher_reqtfm(req));
 713	int err;
 714
 715	if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
 716		ablkcipher_request_set_tfm(req, ctx->fallback);
 717		err = crypto_ablkcipher_encrypt(req);
 718		ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
 719		return err;
 720	}
 721
 722	return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
 723}
 724
 725static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req)
 726{
 727	struct crypto_tfm *tfm =
 728		crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
 729	struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
 730		crypto_ablkcipher_reqtfm(req));
 731	int err;
 732
 733	if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
 734		ablkcipher_request_set_tfm(req, ctx->fallback);
 735		err = crypto_ablkcipher_decrypt(req);
 736		ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
 737		return err;
 738	}
 739
 740	return sahara_aes_crypt(req, FLAGS_CBC);
 741}
 742
 743static int sahara_aes_cra_init(struct crypto_tfm *tfm)
 744{
 745	const char *name = crypto_tfm_alg_name(tfm);
 746	struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
 747
 748	ctx->fallback = crypto_alloc_ablkcipher(name, 0,
 749				CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
 750	if (IS_ERR(ctx->fallback)) {
 751		pr_err("Error allocating fallback algo %s\n", name);
 752		return PTR_ERR(ctx->fallback);
 753	}
 754
 755	tfm->crt_ablkcipher.reqsize = sizeof(struct sahara_aes_reqctx);
 
 756
 757	return 0;
 758}
 759
 760static void sahara_aes_cra_exit(struct crypto_tfm *tfm)
 761{
 762	struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
 763
 764	if (ctx->fallback)
 765		crypto_free_ablkcipher(ctx->fallback);
 766	ctx->fallback = NULL;
 767}
 768
 769static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
 770			      struct sahara_sha_reqctx *rctx)
 771{
 772	u32 hdr = 0;
 773
 774	hdr = rctx->mode;
 775
 776	if (rctx->first) {
 777		hdr |= SAHARA_HDR_MDHA_SET_MODE_HASH;
 778		hdr |= SAHARA_HDR_MDHA_INIT;
 779	} else {
 780		hdr |= SAHARA_HDR_MDHA_SET_MODE_MD_KEY;
 781	}
 782
 783	if (rctx->last)
 784		hdr |= SAHARA_HDR_MDHA_PDATA;
 785
 786	if (hweight_long(hdr) % 2 == 0)
 787		hdr |= SAHARA_HDR_PARITY_BIT;
 788
 789	return hdr;
 790}
 791
 792static int sahara_sha_hw_links_create(struct sahara_dev *dev,
 793				       struct sahara_sha_reqctx *rctx,
 794				       int start)
 795{
 796	struct scatterlist *sg;
 
 797	unsigned int i;
 798	int ret;
 799
 800	dev->in_sg = rctx->in_sg;
 801
 802	dev->nb_in_sg = sg_nents_for_len(dev->in_sg, rctx->total);
 803	if (dev->nb_in_sg < 0) {
 804		dev_err(dev->device, "Invalid numbers of src SG.\n");
 805		return dev->nb_in_sg;
 806	}
 807	if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) {
 808		dev_err(dev->device, "not enough hw links (%d)\n",
 809			dev->nb_in_sg + dev->nb_out_sg);
 810		return -EINVAL;
 811	}
 812
 813	sg = dev->in_sg;
 814	ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg, DMA_TO_DEVICE);
 815	if (!ret)
 816		return -EFAULT;
 817
 
 818	for (i = start; i < dev->nb_in_sg + start; i++) {
 819		dev->hw_link[i]->len = sg->length;
 820		dev->hw_link[i]->p = sg->dma_address;
 821		if (i == (dev->nb_in_sg + start - 1)) {
 822			dev->hw_link[i]->next = 0;
 823		} else {
 
 824			dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
 825			sg = sg_next(sg);
 826		}
 827	}
 828
 829	return i;
 830}
 831
 832static int sahara_sha_hw_data_descriptor_create(struct sahara_dev *dev,
 833						struct sahara_sha_reqctx *rctx,
 834						struct ahash_request *req,
 835						int index)
 836{
 837	unsigned result_len;
 838	int i = index;
 839
 840	if (rctx->first)
 841		/* Create initial descriptor: #8*/
 842		dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
 843	else
 844		/* Create hash descriptor: #10. Must follow #6. */
 845		dev->hw_desc[index]->hdr = SAHARA_HDR_MDHA_HASH;
 846
 847	dev->hw_desc[index]->len1 = rctx->total;
 848	if (dev->hw_desc[index]->len1 == 0) {
 849		/* if len1 is 0, p1 must be 0, too */
 850		dev->hw_desc[index]->p1 = 0;
 851		rctx->sg_in_idx = 0;
 852	} else {
 853		/* Create input links */
 854		dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
 855		i = sahara_sha_hw_links_create(dev, rctx, index);
 856
 857		rctx->sg_in_idx = index;
 858		if (i < 0)
 859			return i;
 860	}
 861
 862	dev->hw_desc[index]->p2 = dev->hw_phys_link[i];
 863
 864	/* Save the context for the next operation */
 865	result_len = rctx->context_size;
 866	dev->hw_link[i]->p = dev->context_phys_base;
 867
 868	dev->hw_link[i]->len = result_len;
 869	dev->hw_desc[index]->len2 = result_len;
 870
 871	dev->hw_link[i]->next = 0;
 872
 873	return 0;
 874}
 875
 876/*
 877 * Load descriptor aka #6
 878 *
 879 * To load a previously saved context back to the MDHA unit
 880 *
 881 * p1: Saved Context
 882 * p2: NULL
 883 *
 884 */
 885static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
 886						struct sahara_sha_reqctx *rctx,
 887						struct ahash_request *req,
 888						int index)
 889{
 890	dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
 891
 892	dev->hw_desc[index]->len1 = rctx->context_size;
 893	dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
 894	dev->hw_desc[index]->len2 = 0;
 895	dev->hw_desc[index]->p2 = 0;
 896
 897	dev->hw_link[index]->len = rctx->context_size;
 898	dev->hw_link[index]->p = dev->context_phys_base;
 899	dev->hw_link[index]->next = 0;
 900
 901	return 0;
 902}
 903
 904static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes)
 905{
 906	if (!sg || !sg->length)
 907		return nbytes;
 908
 909	while (nbytes && sg) {
 910		if (nbytes <= sg->length) {
 911			sg->length = nbytes;
 912			sg_mark_end(sg);
 913			break;
 914		}
 915		nbytes -= sg->length;
 916		sg = sg_next(sg);
 917	}
 918
 919	return nbytes;
 920}
 921
 922static int sahara_sha_prepare_request(struct ahash_request *req)
 923{
 924	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 925	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
 926	unsigned int hash_later;
 927	unsigned int block_size;
 928	unsigned int len;
 929
 930	block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
 931
 932	/* append bytes from previous operation */
 933	len = rctx->buf_cnt + req->nbytes;
 934
 935	/* only the last transfer can be padded in hardware */
 936	if (!rctx->last && (len < block_size)) {
 937		/* to few data, save for next operation */
 938		scatterwalk_map_and_copy(rctx->buf + rctx->buf_cnt, req->src,
 939					 0, req->nbytes, 0);
 940		rctx->buf_cnt += req->nbytes;
 941
 942		return 0;
 943	}
 944
 945	/* add data from previous operation first */
 946	if (rctx->buf_cnt)
 947		memcpy(rctx->rembuf, rctx->buf, rctx->buf_cnt);
 948
 949	/* data must always be a multiple of block_size */
 950	hash_later = rctx->last ? 0 : len & (block_size - 1);
 951	if (hash_later) {
 952		unsigned int offset = req->nbytes - hash_later;
 953		/* Save remaining bytes for later use */
 954		scatterwalk_map_and_copy(rctx->buf, req->src, offset,
 955					hash_later, 0);
 956	}
 957
 958	/* nbytes should now be multiple of blocksize */
 959	req->nbytes = req->nbytes - hash_later;
 960
 961	sahara_walk_and_recalc(req->src, req->nbytes);
 962
 963	/* have data from previous operation and current */
 964	if (rctx->buf_cnt && req->nbytes) {
 965		sg_init_table(rctx->in_sg_chain, 2);
 966		sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
 967
 968		sg_chain(rctx->in_sg_chain, 2, req->src);
 969
 970		rctx->total = req->nbytes + rctx->buf_cnt;
 971		rctx->in_sg = rctx->in_sg_chain;
 972
 973		req->src = rctx->in_sg_chain;
 974	/* only data from previous operation */
 975	} else if (rctx->buf_cnt) {
 976		if (req->src)
 977			rctx->in_sg = req->src;
 978		else
 979			rctx->in_sg = rctx->in_sg_chain;
 980		/* buf was copied into rembuf above */
 981		sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
 982		rctx->total = rctx->buf_cnt;
 983	/* no data from previous operation */
 984	} else {
 985		rctx->in_sg = req->src;
 986		rctx->total = req->nbytes;
 987		req->src = rctx->in_sg;
 988	}
 989
 990	/* on next call, we only have the remaining data in the buffer */
 991	rctx->buf_cnt = hash_later;
 992
 993	return -EINPROGRESS;
 994}
 995
 996static int sahara_sha_process(struct ahash_request *req)
 997{
 998	struct sahara_dev *dev = dev_ptr;
 999	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1000	int ret;
1001	unsigned long timeout;
1002
1003	ret = sahara_sha_prepare_request(req);
1004	if (!ret)
1005		return ret;
1006
1007	if (rctx->first) {
1008		sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
 
 
 
1009		dev->hw_desc[0]->next = 0;
1010		rctx->first = 0;
1011	} else {
1012		memcpy(dev->context_base, rctx->context, rctx->context_size);
1013
1014		sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
1015		dev->hw_desc[0]->next = dev->hw_phys_desc[1];
1016		sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
 
 
 
1017		dev->hw_desc[1]->next = 0;
1018	}
1019
1020	sahara_dump_descriptors(dev);
1021	sahara_dump_links(dev);
1022
1023	reinit_completion(&dev->dma_completion);
1024
1025	sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
1026
1027	timeout = wait_for_completion_timeout(&dev->dma_completion,
1028				msecs_to_jiffies(SAHARA_TIMEOUT_MS));
 
 
 
 
 
1029	if (!timeout) {
1030		dev_err(dev->device, "SHA timeout\n");
1031		return -ETIMEDOUT;
1032	}
1033
1034	if (rctx->sg_in_idx)
1035		dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
1036			     DMA_TO_DEVICE);
1037
1038	memcpy(rctx->context, dev->context_base, rctx->context_size);
1039
1040	if (req->result)
1041		memcpy(req->result, rctx->context, rctx->digest_size);
1042
1043	return 0;
1044}
1045
1046static int sahara_queue_manage(void *data)
1047{
1048	struct sahara_dev *dev = (struct sahara_dev *)data;
1049	struct crypto_async_request *async_req;
1050	struct crypto_async_request *backlog;
1051	int ret = 0;
1052
1053	do {
1054		__set_current_state(TASK_INTERRUPTIBLE);
1055
1056		mutex_lock(&dev->queue_mutex);
1057		backlog = crypto_get_backlog(&dev->queue);
1058		async_req = crypto_dequeue_request(&dev->queue);
1059		mutex_unlock(&dev->queue_mutex);
 
 
1060
1061		if (backlog)
1062			backlog->complete(backlog, -EINPROGRESS);
1063
1064		if (async_req) {
1065			if (crypto_tfm_alg_type(async_req->tfm) ==
1066			    CRYPTO_ALG_TYPE_AHASH) {
1067				struct ahash_request *req =
1068					ahash_request_cast(async_req);
1069
1070				ret = sahara_sha_process(req);
1071			} else {
1072				struct ablkcipher_request *req =
1073					ablkcipher_request_cast(async_req);
1074
1075				ret = sahara_aes_process(req);
1076			}
1077
1078			async_req->complete(async_req, ret);
1079
1080			continue;
1081		}
1082
1083		schedule();
1084	} while (!kthread_should_stop());
1085
1086	return 0;
1087}
1088
1089static int sahara_sha_enqueue(struct ahash_request *req, int last)
1090{
1091	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1092	struct sahara_dev *dev = dev_ptr;
1093	int ret;
1094
1095	if (!req->nbytes && !last)
1096		return 0;
1097
1098	rctx->last = last;
1099
1100	if (!rctx->active) {
1101		rctx->active = 1;
1102		rctx->first = 1;
1103	}
1104
1105	mutex_lock(&dev->queue_mutex);
1106	ret = crypto_enqueue_request(&dev->queue, &req->base);
1107	mutex_unlock(&dev->queue_mutex);
1108
1109	wake_up_process(dev->kthread);
1110
1111	return ret;
1112}
1113
1114static int sahara_sha_init(struct ahash_request *req)
1115{
1116	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1117	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1118
1119	memset(rctx, 0, sizeof(*rctx));
1120
1121	switch (crypto_ahash_digestsize(tfm)) {
1122	case SHA1_DIGEST_SIZE:
1123		rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA1;
1124		rctx->digest_size = SHA1_DIGEST_SIZE;
1125		break;
1126	case SHA256_DIGEST_SIZE:
1127		rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA256;
1128		rctx->digest_size = SHA256_DIGEST_SIZE;
1129		break;
1130	default:
1131		return -EINVAL;
1132	}
1133
1134	rctx->context_size = rctx->digest_size + 4;
1135	rctx->active = 0;
1136
1137	return 0;
1138}
1139
1140static int sahara_sha_update(struct ahash_request *req)
1141{
1142	return sahara_sha_enqueue(req, 0);
1143}
1144
1145static int sahara_sha_final(struct ahash_request *req)
1146{
1147	req->nbytes = 0;
1148	return sahara_sha_enqueue(req, 1);
1149}
1150
1151static int sahara_sha_finup(struct ahash_request *req)
1152{
1153	return sahara_sha_enqueue(req, 1);
1154}
1155
1156static int sahara_sha_digest(struct ahash_request *req)
1157{
1158	sahara_sha_init(req);
1159
1160	return sahara_sha_finup(req);
1161}
1162
1163static int sahara_sha_export(struct ahash_request *req, void *out)
1164{
1165	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1166
1167	memcpy(out, rctx, sizeof(struct sahara_sha_reqctx));
1168
1169	return 0;
1170}
1171
1172static int sahara_sha_import(struct ahash_request *req, const void *in)
1173{
1174	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1175
1176	memcpy(rctx, in, sizeof(struct sahara_sha_reqctx));
1177
1178	return 0;
1179}
1180
1181static int sahara_sha_cra_init(struct crypto_tfm *tfm)
1182{
1183	const char *name = crypto_tfm_alg_name(tfm);
1184	struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
1185
1186	ctx->shash_fallback = crypto_alloc_shash(name, 0,
1187					CRYPTO_ALG_NEED_FALLBACK);
1188	if (IS_ERR(ctx->shash_fallback)) {
1189		pr_err("Error allocating fallback algo %s\n", name);
1190		return PTR_ERR(ctx->shash_fallback);
1191	}
1192	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1193				 sizeof(struct sahara_sha_reqctx) +
1194				 SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
1195
1196	return 0;
1197}
1198
1199static void sahara_sha_cra_exit(struct crypto_tfm *tfm)
1200{
1201	struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
1202
1203	crypto_free_shash(ctx->shash_fallback);
1204	ctx->shash_fallback = NULL;
1205}
1206
1207static struct crypto_alg aes_algs[] = {
1208{
1209	.cra_name		= "ecb(aes)",
1210	.cra_driver_name	= "sahara-ecb-aes",
1211	.cra_priority		= 300,
1212	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
1213			CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1214	.cra_blocksize		= AES_BLOCK_SIZE,
1215	.cra_ctxsize		= sizeof(struct sahara_ctx),
1216	.cra_alignmask		= 0x0,
1217	.cra_type		= &crypto_ablkcipher_type,
1218	.cra_module		= THIS_MODULE,
1219	.cra_init		= sahara_aes_cra_init,
1220	.cra_exit		= sahara_aes_cra_exit,
1221	.cra_u.ablkcipher = {
1222		.min_keysize	= AES_MIN_KEY_SIZE ,
1223		.max_keysize	= AES_MAX_KEY_SIZE,
1224		.setkey		= sahara_aes_setkey,
1225		.encrypt	= sahara_aes_ecb_encrypt,
1226		.decrypt	= sahara_aes_ecb_decrypt,
1227	}
1228}, {
1229	.cra_name		= "cbc(aes)",
1230	.cra_driver_name	= "sahara-cbc-aes",
1231	.cra_priority		= 300,
1232	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
1233			CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1234	.cra_blocksize		= AES_BLOCK_SIZE,
1235	.cra_ctxsize		= sizeof(struct sahara_ctx),
1236	.cra_alignmask		= 0x0,
1237	.cra_type		= &crypto_ablkcipher_type,
1238	.cra_module		= THIS_MODULE,
1239	.cra_init		= sahara_aes_cra_init,
1240	.cra_exit		= sahara_aes_cra_exit,
1241	.cra_u.ablkcipher = {
1242		.min_keysize	= AES_MIN_KEY_SIZE ,
1243		.max_keysize	= AES_MAX_KEY_SIZE,
1244		.ivsize		= AES_BLOCK_SIZE,
1245		.setkey		= sahara_aes_setkey,
1246		.encrypt	= sahara_aes_cbc_encrypt,
1247		.decrypt	= sahara_aes_cbc_decrypt,
1248	}
 
 
1249}
1250};
1251
1252static struct ahash_alg sha_v3_algs[] = {
1253{
1254	.init		= sahara_sha_init,
1255	.update		= sahara_sha_update,
1256	.final		= sahara_sha_final,
1257	.finup		= sahara_sha_finup,
1258	.digest		= sahara_sha_digest,
1259	.export		= sahara_sha_export,
1260	.import		= sahara_sha_import,
1261	.halg.digestsize	= SHA1_DIGEST_SIZE,
1262	.halg.statesize         = sizeof(struct sahara_sha_reqctx),
1263	.halg.base	= {
1264		.cra_name		= "sha1",
1265		.cra_driver_name	= "sahara-sha1",
1266		.cra_priority		= 300,
1267		.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
1268						CRYPTO_ALG_ASYNC |
1269						CRYPTO_ALG_NEED_FALLBACK,
1270		.cra_blocksize		= SHA1_BLOCK_SIZE,
1271		.cra_ctxsize		= sizeof(struct sahara_ctx),
1272		.cra_alignmask		= 0,
1273		.cra_module		= THIS_MODULE,
1274		.cra_init		= sahara_sha_cra_init,
1275		.cra_exit		= sahara_sha_cra_exit,
1276	}
 
 
 
1277},
1278};
1279
1280static struct ahash_alg sha_v4_algs[] = {
1281{
1282	.init		= sahara_sha_init,
1283	.update		= sahara_sha_update,
1284	.final		= sahara_sha_final,
1285	.finup		= sahara_sha_finup,
1286	.digest		= sahara_sha_digest,
1287	.export		= sahara_sha_export,
1288	.import		= sahara_sha_import,
1289	.halg.digestsize	= SHA256_DIGEST_SIZE,
1290	.halg.statesize         = sizeof(struct sahara_sha_reqctx),
1291	.halg.base	= {
1292		.cra_name		= "sha256",
1293		.cra_driver_name	= "sahara-sha256",
1294		.cra_priority		= 300,
1295		.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
1296						CRYPTO_ALG_ASYNC |
1297						CRYPTO_ALG_NEED_FALLBACK,
1298		.cra_blocksize		= SHA256_BLOCK_SIZE,
1299		.cra_ctxsize		= sizeof(struct sahara_ctx),
1300		.cra_alignmask		= 0,
1301		.cra_module		= THIS_MODULE,
1302		.cra_init		= sahara_sha_cra_init,
1303		.cra_exit		= sahara_sha_cra_exit,
1304	}
 
 
 
1305},
1306};
1307
1308static irqreturn_t sahara_irq_handler(int irq, void *data)
1309{
1310	struct sahara_dev *dev = (struct sahara_dev *)data;
1311	unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
1312	unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
1313
1314	sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR,
1315		     SAHARA_REG_CMD);
1316
1317	sahara_decode_status(dev, stat);
1318
1319	if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY) {
1320		return IRQ_NONE;
1321	} else if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_COMPLETE) {
1322		dev->error = 0;
1323	} else {
1324		sahara_decode_error(dev, err);
1325		dev->error = -EINVAL;
1326	}
1327
1328	complete(&dev->dma_completion);
1329
1330	return IRQ_HANDLED;
1331}
1332
1333
1334static int sahara_register_algs(struct sahara_dev *dev)
1335{
1336	int err;
1337	unsigned int i, j, k, l;
1338
1339	for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
1340		INIT_LIST_HEAD(&aes_algs[i].cra_list);
1341		err = crypto_register_alg(&aes_algs[i]);
1342		if (err)
1343			goto err_aes_algs;
1344	}
 
 
1345
1346	for (k = 0; k < ARRAY_SIZE(sha_v3_algs); k++) {
1347		err = crypto_register_ahash(&sha_v3_algs[k]);
 
1348		if (err)
1349			goto err_sha_v3_algs;
1350	}
1351
1352	if (dev->version > SAHARA_VERSION_3)
1353		for (l = 0; l < ARRAY_SIZE(sha_v4_algs); l++) {
1354			err = crypto_register_ahash(&sha_v4_algs[l]);
1355			if (err)
1356				goto err_sha_v4_algs;
1357		}
1358
1359	return 0;
1360
1361err_sha_v4_algs:
1362	for (j = 0; j < l; j++)
1363		crypto_unregister_ahash(&sha_v4_algs[j]);
1364
1365err_sha_v3_algs:
1366	for (j = 0; j < k; j++)
1367		crypto_unregister_ahash(&sha_v4_algs[j]);
1368
1369err_aes_algs:
1370	for (j = 0; j < i; j++)
1371		crypto_unregister_alg(&aes_algs[j]);
1372
1373	return err;
1374}
1375
1376static void sahara_unregister_algs(struct sahara_dev *dev)
1377{
1378	unsigned int i;
1379
1380	for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1381		crypto_unregister_alg(&aes_algs[i]);
1382
1383	for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
1384		crypto_unregister_ahash(&sha_v3_algs[i]);
1385
1386	if (dev->version > SAHARA_VERSION_3)
1387		for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
1388			crypto_unregister_ahash(&sha_v4_algs[i]);
1389}
1390
1391static struct platform_device_id sahara_platform_ids[] = {
1392	{ .name = "sahara-imx27" },
1393	{ /* sentinel */ }
1394};
1395MODULE_DEVICE_TABLE(platform, sahara_platform_ids);
1396
1397static struct of_device_id sahara_dt_ids[] = {
1398	{ .compatible = "fsl,imx53-sahara" },
1399	{ .compatible = "fsl,imx27-sahara" },
1400	{ /* sentinel */ }
1401};
1402MODULE_DEVICE_TABLE(of, sahara_dt_ids);
1403
1404static int sahara_probe(struct platform_device *pdev)
1405{
1406	struct sahara_dev *dev;
1407	struct resource *res;
1408	u32 version;
1409	int irq;
1410	int err;
1411	int i;
1412
1413	dev = devm_kzalloc(&pdev->dev, sizeof(struct sahara_dev), GFP_KERNEL);
1414	if (dev == NULL) {
1415		dev_err(&pdev->dev, "unable to alloc data struct.\n");
1416		return -ENOMEM;
1417	}
1418
1419	dev->device = &pdev->dev;
1420	platform_set_drvdata(pdev, dev);
1421
1422	/* Get the base address */
1423	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1424	dev->regs_base = devm_ioremap_resource(&pdev->dev, res);
1425	if (IS_ERR(dev->regs_base))
1426		return PTR_ERR(dev->regs_base);
1427
1428	/* Get the IRQ */
1429	irq = platform_get_irq(pdev,  0);
1430	if (irq < 0) {
1431		dev_err(&pdev->dev, "failed to get irq resource\n");
1432		return irq;
1433	}
1434
1435	err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
1436			       0, dev_name(&pdev->dev), dev);
1437	if (err) {
1438		dev_err(&pdev->dev, "failed to request irq\n");
1439		return err;
1440	}
1441
1442	/* clocks */
1443	dev->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1444	if (IS_ERR(dev->clk_ipg)) {
1445		dev_err(&pdev->dev, "Could not get ipg clock\n");
1446		return PTR_ERR(dev->clk_ipg);
1447	}
1448
1449	dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
1450	if (IS_ERR(dev->clk_ahb)) {
1451		dev_err(&pdev->dev, "Could not get ahb clock\n");
1452		return PTR_ERR(dev->clk_ahb);
1453	}
1454
1455	/* Allocate HW descriptors */
1456	dev->hw_desc[0] = dmam_alloc_coherent(&pdev->dev,
1457			SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1458			&dev->hw_phys_desc[0], GFP_KERNEL);
1459	if (!dev->hw_desc[0]) {
1460		dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
1461		return -ENOMEM;
1462	}
1463	dev->hw_desc[1] = dev->hw_desc[0] + 1;
1464	dev->hw_phys_desc[1] = dev->hw_phys_desc[0] +
1465				sizeof(struct sahara_hw_desc);
1466
1467	/* Allocate space for iv and key */
1468	dev->key_base = dmam_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
1469				&dev->key_phys_base, GFP_KERNEL);
1470	if (!dev->key_base) {
1471		dev_err(&pdev->dev, "Could not allocate memory for key\n");
1472		return -ENOMEM;
1473	}
1474	dev->iv_base = dev->key_base + AES_KEYSIZE_128;
1475	dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
1476
1477	/* Allocate space for context: largest digest + message length field */
1478	dev->context_base = dmam_alloc_coherent(&pdev->dev,
1479					SHA256_DIGEST_SIZE + 4,
1480					&dev->context_phys_base, GFP_KERNEL);
1481	if (!dev->context_base) {
1482		dev_err(&pdev->dev, "Could not allocate memory for MDHA context\n");
1483		return -ENOMEM;
1484	}
1485
1486	/* Allocate space for HW links */
1487	dev->hw_link[0] = dmam_alloc_coherent(&pdev->dev,
1488			SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
1489			&dev->hw_phys_link[0], GFP_KERNEL);
1490	if (!dev->hw_link[0]) {
1491		dev_err(&pdev->dev, "Could not allocate hw links\n");
1492		return -ENOMEM;
1493	}
1494	for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
1495		dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
1496					sizeof(struct sahara_hw_link);
1497		dev->hw_link[i] = dev->hw_link[i - 1] + 1;
1498	}
1499
1500	crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
1501
1502	spin_lock_init(&dev->lock);
1503	mutex_init(&dev->queue_mutex);
 
1504
1505	dev_ptr = dev;
1506
1507	dev->kthread = kthread_run(sahara_queue_manage, dev, "sahara_crypto");
1508	if (IS_ERR(dev->kthread)) {
1509		return PTR_ERR(dev->kthread);
1510	}
1511
1512	init_completion(&dev->dma_completion);
1513
1514	err = clk_prepare_enable(dev->clk_ipg);
1515	if (err)
1516		return err;
1517	err = clk_prepare_enable(dev->clk_ahb);
1518	if (err)
1519		goto clk_ipg_disable;
1520
1521	version = sahara_read(dev, SAHARA_REG_VERSION);
1522	if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) {
1523		if (version != SAHARA_VERSION_3)
1524			err = -ENODEV;
1525	} else if (of_device_is_compatible(pdev->dev.of_node,
1526			"fsl,imx53-sahara")) {
1527		if (((version >> 8) & 0xff) != SAHARA_VERSION_4)
1528			err = -ENODEV;
1529		version = (version >> 8) & 0xff;
1530	}
1531	if (err == -ENODEV) {
1532		dev_err(&pdev->dev, "SAHARA version %d not supported\n",
1533				version);
1534		goto err_algs;
1535	}
1536
1537	dev->version = version;
1538
1539	sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH,
1540		     SAHARA_REG_CMD);
1541	sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) |
1542			SAHARA_CONTROL_SET_MAXBURST(8) |
1543			SAHARA_CONTROL_RNG_AUTORSD |
1544			SAHARA_CONTROL_ENABLE_INT,
1545			SAHARA_REG_CONTROL);
1546
1547	err = sahara_register_algs(dev);
1548	if (err)
1549		goto err_algs;
1550
1551	dev_info(&pdev->dev, "SAHARA version %d initialized\n", version);
1552
1553	return 0;
1554
1555err_algs:
1556	kthread_stop(dev->kthread);
1557	dev_ptr = NULL;
1558	clk_disable_unprepare(dev->clk_ahb);
1559clk_ipg_disable:
1560	clk_disable_unprepare(dev->clk_ipg);
1561
1562	return err;
1563}
1564
1565static int sahara_remove(struct platform_device *pdev)
1566{
1567	struct sahara_dev *dev = platform_get_drvdata(pdev);
1568
1569	kthread_stop(dev->kthread);
1570
1571	sahara_unregister_algs(dev);
1572
1573	clk_disable_unprepare(dev->clk_ipg);
1574	clk_disable_unprepare(dev->clk_ahb);
1575
1576	dev_ptr = NULL;
1577
1578	return 0;
1579}
1580
1581static struct platform_driver sahara_driver = {
1582	.probe		= sahara_probe,
1583	.remove		= sahara_remove,
1584	.driver		= {
1585		.name	= SAHARA_NAME,
1586		.of_match_table = sahara_dt_ids,
1587	},
1588	.id_table = sahara_platform_ids,
1589};
1590
1591module_platform_driver(sahara_driver);
1592
1593MODULE_LICENSE("GPL");
1594MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
1595MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de>");
1596MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");