Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Cryptographic API.
   4 *
   5 * Support for SAHARA cryptographic accelerator.
   6 *
   7 * Copyright (c) 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
   8 * Copyright (c) 2013 Vista Silicon S.L.
   9 * Author: Javier Martin <javier.martin@vista-silicon.com>
  10 *
 
 
 
 
  11 * Based on omap-aes.c and tegra-aes.c
  12 */
  13
  14#include <crypto/aes.h>
  15#include <crypto/internal/hash.h>
  16#include <crypto/internal/skcipher.h>
  17#include <crypto/scatterwalk.h>
  18#include <crypto/engine.h>
  19#include <crypto/sha1.h>
  20#include <crypto/sha2.h>
  21
  22#include <linux/clk.h>
  23#include <linux/dma-mapping.h>
  24#include <linux/interrupt.h>
  25#include <linux/io.h>
  26#include <linux/irq.h>
  27#include <linux/kernel.h>
 
  28#include <linux/module.h>
 
  29#include <linux/of.h>
 
  30#include <linux/platform_device.h>
  31#include <linux/spinlock.h>
  32
  33#define SHA_BUFFER_LEN				PAGE_SIZE
  34#define SAHARA_MAX_SHA_BLOCK_SIZE		SHA256_BLOCK_SIZE
  35
  36#define SAHARA_NAME				"sahara"
  37#define SAHARA_VERSION_3			3
  38#define SAHARA_VERSION_4			4
  39#define SAHARA_TIMEOUT_MS			1000
  40#define SAHARA_MAX_HW_DESC			2
  41#define SAHARA_MAX_HW_LINK			20
  42
  43#define FLAGS_MODE_MASK				0x000f
  44#define FLAGS_ENCRYPT				BIT(0)
  45#define FLAGS_CBC				BIT(1)
  46
  47#define SAHARA_HDR_BASE				0x00800000
  48#define SAHARA_HDR_SKHA_ALG_AES			0
  49#define SAHARA_HDR_SKHA_MODE_ECB		0
  50#define SAHARA_HDR_SKHA_OP_ENC			BIT(2)
  51#define SAHARA_HDR_SKHA_MODE_CBC		BIT(3)
  52#define SAHARA_HDR_FORM_DATA			(5 << 16)
  53#define SAHARA_HDR_FORM_KEY			BIT(19)
  54#define SAHARA_HDR_LLO				BIT(24)
  55#define SAHARA_HDR_CHA_SKHA			BIT(28)
  56#define SAHARA_HDR_CHA_MDHA			BIT(29)
  57#define SAHARA_HDR_PARITY_BIT			BIT(31)
  58
  59#define SAHARA_HDR_MDHA_SET_MODE_MD_KEY		0x20880000
  60#define SAHARA_HDR_MDHA_SET_MODE_HASH		0x208D0000
  61#define SAHARA_HDR_MDHA_HASH			0xA0850000
  62#define SAHARA_HDR_MDHA_STORE_DIGEST		0x20820000
  63#define SAHARA_HDR_MDHA_ALG_SHA1		0
  64#define SAHARA_HDR_MDHA_ALG_MD5			1
  65#define SAHARA_HDR_MDHA_ALG_SHA256		2
  66#define SAHARA_HDR_MDHA_ALG_SHA224		3
  67#define SAHARA_HDR_MDHA_PDATA			BIT(2)
  68#define SAHARA_HDR_MDHA_HMAC			BIT(3)
  69#define SAHARA_HDR_MDHA_INIT			BIT(5)
  70#define SAHARA_HDR_MDHA_IPAD			BIT(6)
  71#define SAHARA_HDR_MDHA_OPAD			BIT(7)
  72#define SAHARA_HDR_MDHA_SWAP			BIT(8)
  73#define SAHARA_HDR_MDHA_MAC_FULL		BIT(9)
  74#define SAHARA_HDR_MDHA_SSL			BIT(10)
  75
  76#define SAHARA_REG_VERSION			0x00
  77#define SAHARA_REG_DAR				0x04
  78#define SAHARA_REG_CONTROL			0x08
  79#define SAHARA_CONTROL_SET_THROTTLE(x)		(((x) & 0xff) << 24)
  80#define SAHARA_CONTROL_SET_MAXBURST(x)		(((x) & 0xff) << 16)
  81#define SAHARA_CONTROL_RNG_AUTORSD		BIT(7)
  82#define SAHARA_CONTROL_ENABLE_INT		BIT(4)
  83#define SAHARA_REG_CMD				0x0C
  84#define SAHARA_CMD_RESET			BIT(0)
  85#define SAHARA_CMD_CLEAR_INT			BIT(8)
  86#define SAHARA_CMD_CLEAR_ERR			BIT(9)
  87#define SAHARA_CMD_SINGLE_STEP			BIT(10)
  88#define SAHARA_CMD_MODE_BATCH			BIT(16)
  89#define SAHARA_CMD_MODE_DEBUG			BIT(18)
  90#define SAHARA_REG_STATUS			0x10
  91#define SAHARA_STATUS_GET_STATE(x)		((x) & 0x7)
  92#define SAHARA_STATE_IDLE			0
  93#define SAHARA_STATE_BUSY			1
  94#define SAHARA_STATE_ERR			2
  95#define SAHARA_STATE_FAULT			3
  96#define SAHARA_STATE_COMPLETE			4
  97#define SAHARA_STATE_COMP_FLAG			BIT(2)
  98#define SAHARA_STATUS_DAR_FULL			BIT(3)
  99#define SAHARA_STATUS_ERROR			BIT(4)
 100#define SAHARA_STATUS_SECURE			BIT(5)
 101#define SAHARA_STATUS_FAIL			BIT(6)
 102#define SAHARA_STATUS_INIT			BIT(7)
 103#define SAHARA_STATUS_RNG_RESEED		BIT(8)
 104#define SAHARA_STATUS_ACTIVE_RNG		BIT(9)
 105#define SAHARA_STATUS_ACTIVE_MDHA		BIT(10)
 106#define SAHARA_STATUS_ACTIVE_SKHA		BIT(11)
 107#define SAHARA_STATUS_MODE_BATCH		BIT(16)
 108#define SAHARA_STATUS_MODE_DEDICATED		BIT(17)
 109#define SAHARA_STATUS_MODE_DEBUG		BIT(18)
 110#define SAHARA_STATUS_GET_ISTATE(x)		(((x) >> 24) & 0xff)
 111#define SAHARA_REG_ERRSTATUS			0x14
 112#define SAHARA_ERRSTATUS_GET_SOURCE(x)		((x) & 0xf)
 113#define SAHARA_ERRSOURCE_CHA			14
 114#define SAHARA_ERRSOURCE_DMA			15
 115#define SAHARA_ERRSTATUS_DMA_DIR		BIT(8)
 116#define SAHARA_ERRSTATUS_GET_DMASZ(x)		(((x) >> 9) & 0x3)
 117#define SAHARA_ERRSTATUS_GET_DMASRC(x)		(((x) >> 13) & 0x7)
 118#define SAHARA_ERRSTATUS_GET_CHASRC(x)		(((x) >> 16) & 0xfff)
 119#define SAHARA_ERRSTATUS_GET_CHAERR(x)		(((x) >> 28) & 0x3)
 120#define SAHARA_REG_FADDR			0x18
 121#define SAHARA_REG_CDAR				0x1C
 122#define SAHARA_REG_IDAR				0x20
 
 
 
 
 123
 124struct sahara_hw_desc {
 125	u32	hdr;
 126	u32	len1;
 127	u32	p1;
 128	u32	len2;
 129	u32	p2;
 130	u32	next;
 131};
 132
 133struct sahara_hw_link {
 134	u32	len;
 135	u32	p;
 136	u32	next;
 137};
 138
 139struct sahara_ctx {
 
 
 140	/* AES-specific context */
 141	int keylen;
 142	u8 key[AES_KEYSIZE_128];
 143	struct crypto_skcipher *fallback;
 144};
 145
 146struct sahara_aes_reqctx {
 147	unsigned long mode;
 148	u8 iv_out[AES_BLOCK_SIZE];
 149	struct skcipher_request fallback_req;	// keep at the end
 150};
 151
 152/*
 153 * struct sahara_sha_reqctx - private data per request
 154 * @buf: holds data for requests smaller than block_size
 155 * @rembuf: used to prepare one block_size-aligned request
 156 * @context: hw-specific context for request. Digest is extracted from this
 157 * @mode: specifies what type of hw-descriptor needs to be built
 158 * @digest_size: length of digest for this request
 159 * @context_size: length of hw-context for this request.
 160 *                Always digest_size + 4
 161 * @buf_cnt: number of bytes saved in buf
 162 * @sg_in_idx: number of hw links
 163 * @in_sg: scatterlist for input data
 164 * @in_sg_chain: scatterlists for chained input data
 165 * @total: total number of bytes for transfer
 166 * @last: is this the last block
 167 * @first: is this the first block
 
 168 */
 169struct sahara_sha_reqctx {
 170	u8			buf[SAHARA_MAX_SHA_BLOCK_SIZE];
 171	u8			rembuf[SAHARA_MAX_SHA_BLOCK_SIZE];
 172	u8			context[SHA256_DIGEST_SIZE + 4];
 173	unsigned int		mode;
 174	unsigned int		digest_size;
 175	unsigned int		context_size;
 176	unsigned int		buf_cnt;
 177	unsigned int		sg_in_idx;
 178	struct scatterlist	*in_sg;
 179	struct scatterlist	in_sg_chain[2];
 180	size_t			total;
 181	unsigned int		last;
 182	unsigned int		first;
 
 183};
 184
 185struct sahara_dev {
 186	struct device		*device;
 187	unsigned int		version;
 188	void __iomem		*regs_base;
 189	struct clk		*clk_ipg;
 190	struct clk		*clk_ahb;
 
 
 191	struct completion	dma_completion;
 192
 193	struct sahara_ctx	*ctx;
 
 
 194	unsigned long		flags;
 195
 196	struct sahara_hw_desc	*hw_desc[SAHARA_MAX_HW_DESC];
 197	dma_addr_t		hw_phys_desc[SAHARA_MAX_HW_DESC];
 198
 199	u8			*key_base;
 200	dma_addr_t		key_phys_base;
 201
 202	u8			*iv_base;
 203	dma_addr_t		iv_phys_base;
 204
 205	u8			*context_base;
 206	dma_addr_t		context_phys_base;
 207
 208	struct sahara_hw_link	*hw_link[SAHARA_MAX_HW_LINK];
 209	dma_addr_t		hw_phys_link[SAHARA_MAX_HW_LINK];
 210
 211	size_t			total;
 212	struct scatterlist	*in_sg;
 213	int		nb_in_sg;
 214	struct scatterlist	*out_sg;
 215	int		nb_out_sg;
 216
 217	struct crypto_engine *engine;
 218};
 219
 220static struct sahara_dev *dev_ptr;
 221
 222static inline void sahara_write(struct sahara_dev *dev, u32 data, u32 reg)
 223{
 224	writel(data, dev->regs_base + reg);
 225}
 226
 227static inline unsigned int sahara_read(struct sahara_dev *dev, u32 reg)
 228{
 229	return readl(dev->regs_base + reg);
 230}
 231
 232static u32 sahara_aes_key_hdr(struct sahara_dev *dev)
 233{
 234	u32 hdr = SAHARA_HDR_BASE | SAHARA_HDR_SKHA_ALG_AES |
 235			SAHARA_HDR_FORM_KEY | SAHARA_HDR_LLO |
 236			SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
 237
 238	if (dev->flags & FLAGS_CBC) {
 239		hdr |= SAHARA_HDR_SKHA_MODE_CBC;
 240		hdr ^= SAHARA_HDR_PARITY_BIT;
 241	}
 242
 243	if (dev->flags & FLAGS_ENCRYPT) {
 244		hdr |= SAHARA_HDR_SKHA_OP_ENC;
 245		hdr ^= SAHARA_HDR_PARITY_BIT;
 246	}
 247
 248	return hdr;
 249}
 250
 251static u32 sahara_aes_data_link_hdr(struct sahara_dev *dev)
 252{
 253	return SAHARA_HDR_BASE | SAHARA_HDR_FORM_DATA |
 254			SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
 255}
 256
 257static const char *sahara_err_src[16] = {
 258	"No error",
 259	"Header error",
 260	"Descriptor length error",
 261	"Descriptor length or pointer error",
 262	"Link length error",
 263	"Link pointer error",
 264	"Input buffer error",
 265	"Output buffer error",
 266	"Output buffer starvation",
 267	"Internal state fault",
 268	"General descriptor problem",
 269	"Reserved",
 270	"Descriptor address error",
 271	"Link address error",
 272	"CHA error",
 273	"DMA error"
 274};
 275
 276static const char *sahara_err_dmasize[4] = {
 277	"Byte transfer",
 278	"Half-word transfer",
 279	"Word transfer",
 280	"Reserved"
 281};
 282
 283static const char *sahara_err_dmasrc[8] = {
 284	"No error",
 285	"AHB bus error",
 286	"Internal IP bus error",
 287	"Parity error",
 288	"DMA crosses 256 byte boundary",
 289	"DMA is busy",
 290	"Reserved",
 291	"DMA HW error"
 292};
 293
 294static const char *sahara_cha_errsrc[12] = {
 295	"Input buffer non-empty",
 296	"Illegal address",
 297	"Illegal mode",
 298	"Illegal data size",
 299	"Illegal key size",
 300	"Write during processing",
 301	"CTX read during processing",
 302	"HW error",
 303	"Input buffer disabled/underflow",
 304	"Output buffer disabled/overflow",
 305	"DES key parity error",
 306	"Reserved"
 307};
 308
 309static const char *sahara_cha_err[4] = { "No error", "SKHA", "MDHA", "RNG" };
 310
 311static void sahara_decode_error(struct sahara_dev *dev, unsigned int error)
 312{
 313	u8 source = SAHARA_ERRSTATUS_GET_SOURCE(error);
 314	u16 chasrc = ffs(SAHARA_ERRSTATUS_GET_CHASRC(error));
 315
 316	dev_err(dev->device, "%s: Error Register = 0x%08x\n", __func__, error);
 317
 318	dev_err(dev->device, "	- %s.\n", sahara_err_src[source]);
 319
 320	if (source == SAHARA_ERRSOURCE_DMA) {
 321		if (error & SAHARA_ERRSTATUS_DMA_DIR)
 322			dev_err(dev->device, "		* DMA read.\n");
 323		else
 324			dev_err(dev->device, "		* DMA write.\n");
 325
 326		dev_err(dev->device, "		* %s.\n",
 327		       sahara_err_dmasize[SAHARA_ERRSTATUS_GET_DMASZ(error)]);
 328		dev_err(dev->device, "		* %s.\n",
 329		       sahara_err_dmasrc[SAHARA_ERRSTATUS_GET_DMASRC(error)]);
 330	} else if (source == SAHARA_ERRSOURCE_CHA) {
 331		dev_err(dev->device, "		* %s.\n",
 332			sahara_cha_errsrc[chasrc]);
 333		dev_err(dev->device, "		* %s.\n",
 334		       sahara_cha_err[SAHARA_ERRSTATUS_GET_CHAERR(error)]);
 335	}
 336	dev_err(dev->device, "\n");
 337}
 338
 339static const char *sahara_state[4] = { "Idle", "Busy", "Error", "HW Fault" };
 340
 341static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
 342{
 343	u8 state;
 344
 345	if (!__is_defined(DEBUG))
 346		return;
 347
 348	state = SAHARA_STATUS_GET_STATE(status);
 349
 350	dev_dbg(dev->device, "%s: Status Register = 0x%08x\n",
 351		__func__, status);
 352
 353	dev_dbg(dev->device, "	- State = %d:\n", state);
 354	if (state & SAHARA_STATE_COMP_FLAG)
 355		dev_dbg(dev->device, "		* Descriptor completed. IRQ pending.\n");
 356
 357	dev_dbg(dev->device, "		* %s.\n",
 358	       sahara_state[state & ~SAHARA_STATE_COMP_FLAG]);
 359
 360	if (status & SAHARA_STATUS_DAR_FULL)
 361		dev_dbg(dev->device, "	- DAR Full.\n");
 362	if (status & SAHARA_STATUS_ERROR)
 363		dev_dbg(dev->device, "	- Error.\n");
 364	if (status & SAHARA_STATUS_SECURE)
 365		dev_dbg(dev->device, "	- Secure.\n");
 366	if (status & SAHARA_STATUS_FAIL)
 367		dev_dbg(dev->device, "	- Fail.\n");
 368	if (status & SAHARA_STATUS_RNG_RESEED)
 369		dev_dbg(dev->device, "	- RNG Reseed Request.\n");
 370	if (status & SAHARA_STATUS_ACTIVE_RNG)
 371		dev_dbg(dev->device, "	- RNG Active.\n");
 372	if (status & SAHARA_STATUS_ACTIVE_MDHA)
 373		dev_dbg(dev->device, "	- MDHA Active.\n");
 374	if (status & SAHARA_STATUS_ACTIVE_SKHA)
 375		dev_dbg(dev->device, "	- SKHA Active.\n");
 376
 377	if (status & SAHARA_STATUS_MODE_BATCH)
 378		dev_dbg(dev->device, "	- Batch Mode.\n");
 379	else if (status & SAHARA_STATUS_MODE_DEDICATED)
 380		dev_dbg(dev->device, "	- Dedicated Mode.\n");
 381	else if (status & SAHARA_STATUS_MODE_DEBUG)
 382		dev_dbg(dev->device, "	- Debug Mode.\n");
 383
 384	dev_dbg(dev->device, "	- Internal state = 0x%02x\n",
 385	       SAHARA_STATUS_GET_ISTATE(status));
 386
 387	dev_dbg(dev->device, "Current DAR: 0x%08x\n",
 388		sahara_read(dev, SAHARA_REG_CDAR));
 389	dev_dbg(dev->device, "Initial DAR: 0x%08x\n\n",
 390		sahara_read(dev, SAHARA_REG_IDAR));
 391}
 392
 393static void sahara_dump_descriptors(struct sahara_dev *dev)
 394{
 395	int i;
 396
 397	if (!__is_defined(DEBUG))
 398		return;
 399
 400	for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
 401		dev_dbg(dev->device, "Descriptor (%d) (%pad):\n",
 402			i, &dev->hw_phys_desc[i]);
 403		dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr);
 404		dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1);
 405		dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1);
 406		dev_dbg(dev->device, "\tlen2 = %u\n", dev->hw_desc[i]->len2);
 407		dev_dbg(dev->device, "\tp2 = 0x%08x\n", dev->hw_desc[i]->p2);
 408		dev_dbg(dev->device, "\tnext = 0x%08x\n",
 409			dev->hw_desc[i]->next);
 410	}
 411	dev_dbg(dev->device, "\n");
 412}
 413
 414static void sahara_dump_links(struct sahara_dev *dev)
 415{
 416	int i;
 417
 418	if (!__is_defined(DEBUG))
 419		return;
 420
 421	for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
 422		dev_dbg(dev->device, "Link (%d) (%pad):\n",
 423			i, &dev->hw_phys_link[i]);
 424		dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len);
 425		dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p);
 426		dev_dbg(dev->device, "\tnext = 0x%08x\n",
 427			dev->hw_link[i]->next);
 428	}
 429	dev_dbg(dev->device, "\n");
 430}
 431
 432static int sahara_hw_descriptor_create(struct sahara_dev *dev)
 433{
 434	struct sahara_ctx *ctx = dev->ctx;
 435	struct scatterlist *sg;
 436	int ret;
 437	int i, j;
 438	int idx = 0;
 439	u32 len;
 440
 441	memcpy(dev->key_base, ctx->key, ctx->keylen);
 442
 443	if (dev->flags & FLAGS_CBC) {
 444		dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
 445		dev->hw_desc[idx]->p1 = dev->iv_phys_base;
 446	} else {
 447		dev->hw_desc[idx]->len1 = 0;
 448		dev->hw_desc[idx]->p1 = 0;
 449	}
 450	dev->hw_desc[idx]->len2 = ctx->keylen;
 451	dev->hw_desc[idx]->p2 = dev->key_phys_base;
 452	dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
 453	dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
 
 
 
 
 454
 455	idx++;
 456
 
 
 457
 458	dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total);
 459	if (dev->nb_in_sg < 0) {
 460		dev_err(dev->device, "Invalid numbers of src SG.\n");
 461		return dev->nb_in_sg;
 462	}
 463	dev->nb_out_sg = sg_nents_for_len(dev->out_sg, dev->total);
 464	if (dev->nb_out_sg < 0) {
 465		dev_err(dev->device, "Invalid numbers of dst SG.\n");
 466		return dev->nb_out_sg;
 467	}
 468	if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
 469		dev_err(dev->device, "not enough hw links (%d)\n",
 470			dev->nb_in_sg + dev->nb_out_sg);
 471		return -EINVAL;
 472	}
 473
 474	ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
 475			 DMA_TO_DEVICE);
 476	if (!ret) {
 477		dev_err(dev->device, "couldn't map in sg\n");
 478		return -EINVAL;
 479	}
 480
 481	ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
 482			 DMA_FROM_DEVICE);
 483	if (!ret) {
 484		dev_err(dev->device, "couldn't map out sg\n");
 485		goto unmap_in;
 486	}
 487
 488	/* Create input links */
 489	dev->hw_desc[idx]->p1 = dev->hw_phys_link[0];
 490	sg = dev->in_sg;
 491	len = dev->total;
 492	for (i = 0; i < dev->nb_in_sg; i++) {
 493		dev->hw_link[i]->len = min(len, sg->length);
 494		dev->hw_link[i]->p = sg->dma_address;
 495		if (i == (dev->nb_in_sg - 1)) {
 496			dev->hw_link[i]->next = 0;
 497		} else {
 498			len -= min(len, sg->length);
 499			dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
 500			sg = sg_next(sg);
 501		}
 502	}
 503
 504	/* Create output links */
 505	dev->hw_desc[idx]->p2 = dev->hw_phys_link[i];
 506	sg = dev->out_sg;
 507	len = dev->total;
 508	for (j = i; j < dev->nb_out_sg + i; j++) {
 509		dev->hw_link[j]->len = min(len, sg->length);
 510		dev->hw_link[j]->p = sg->dma_address;
 511		if (j == (dev->nb_out_sg + i - 1)) {
 512			dev->hw_link[j]->next = 0;
 513		} else {
 514			len -= min(len, sg->length);
 515			dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
 516			sg = sg_next(sg);
 517		}
 518	}
 519
 520	/* Fill remaining fields of hw_desc[1] */
 521	dev->hw_desc[idx]->hdr = sahara_aes_data_link_hdr(dev);
 522	dev->hw_desc[idx]->len1 = dev->total;
 523	dev->hw_desc[idx]->len2 = dev->total;
 524	dev->hw_desc[idx]->next = 0;
 525
 526	sahara_dump_descriptors(dev);
 527	sahara_dump_links(dev);
 528
 529	sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
 530
 531	return 0;
 532
 
 
 
 533unmap_in:
 534	dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
 535		DMA_TO_DEVICE);
 536
 537	return -EINVAL;
 538}
 539
 540static void sahara_aes_cbc_update_iv(struct skcipher_request *req)
 541{
 542	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
 543	struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
 544	unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
 545
 546	/* Update IV buffer to contain the last ciphertext block */
 547	if (rctx->mode & FLAGS_ENCRYPT) {
 548		sg_pcopy_to_buffer(req->dst, sg_nents(req->dst), req->iv,
 549				   ivsize, req->cryptlen - ivsize);
 550	} else {
 551		memcpy(req->iv, rctx->iv_out, ivsize);
 552	}
 553}
 554
 555static int sahara_aes_process(struct skcipher_request *req)
 556{
 557	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
 558	struct sahara_dev *dev = dev_ptr;
 559	struct sahara_ctx *ctx;
 560	struct sahara_aes_reqctx *rctx;
 561	int ret;
 562	unsigned long timeout;
 563
 564	/* Request is ready to be dispatched by the device */
 565	dev_dbg(dev->device,
 566		"dispatch request (nbytes=%d, src=%p, dst=%p)\n",
 567		req->cryptlen, req->src, req->dst);
 568
 569	/* assign new request to device */
 570	dev->total = req->cryptlen;
 571	dev->in_sg = req->src;
 572	dev->out_sg = req->dst;
 573
 574	rctx = skcipher_request_ctx(req);
 575	ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
 576	rctx->mode &= FLAGS_MODE_MASK;
 577	dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
 578
 579	if ((dev->flags & FLAGS_CBC) && req->iv) {
 580		unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
 581
 582		memcpy(dev->iv_base, req->iv, ivsize);
 583
 584		if (!(dev->flags & FLAGS_ENCRYPT)) {
 585			sg_pcopy_to_buffer(req->src, sg_nents(req->src),
 586					   rctx->iv_out, ivsize,
 587					   req->cryptlen - ivsize);
 588		}
 589	}
 590
 591	/* assign new context to device */
 592	dev->ctx = ctx;
 593
 594	reinit_completion(&dev->dma_completion);
 595
 596	ret = sahara_hw_descriptor_create(dev);
 597	if (ret)
 598		return -EINVAL;
 599
 600	timeout = wait_for_completion_timeout(&dev->dma_completion,
 601				msecs_to_jiffies(SAHARA_TIMEOUT_MS));
 602
 603	dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
 604		DMA_FROM_DEVICE);
 605	dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
 606		DMA_TO_DEVICE);
 607
 608	if (!timeout) {
 609		dev_err(dev->device, "AES timeout\n");
 610		return -ETIMEDOUT;
 611	}
 612
 613	if ((dev->flags & FLAGS_CBC) && req->iv)
 614		sahara_aes_cbc_update_iv(req);
 
 
 615
 616	return 0;
 617}
 618
 619static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
 620			     unsigned int keylen)
 621{
 622	struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
 
 623
 624	ctx->keylen = keylen;
 625
 626	/* SAHARA only supports 128bit keys */
 627	if (keylen == AES_KEYSIZE_128) {
 628		memcpy(ctx->key, key, keylen);
 
 629		return 0;
 630	}
 631
 632	if (keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
 633		return -EINVAL;
 634
 635	/*
 636	 * The requested key size is not supported by HW, do a fallback.
 637	 */
 638	crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
 639	crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
 640						 CRYPTO_TFM_REQ_MASK);
 641	return crypto_skcipher_setkey(ctx->fallback, key, keylen);
 642}
 643
 644static int sahara_aes_fallback(struct skcipher_request *req, unsigned long mode)
 645{
 646	struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
 647	struct sahara_ctx *ctx = crypto_skcipher_ctx(
 648		crypto_skcipher_reqtfm(req));
 649
 650	skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
 651	skcipher_request_set_callback(&rctx->fallback_req,
 652				      req->base.flags,
 653				      req->base.complete,
 654				      req->base.data);
 655	skcipher_request_set_crypt(&rctx->fallback_req, req->src,
 656				   req->dst, req->cryptlen, req->iv);
 657
 658	if (mode & FLAGS_ENCRYPT)
 659		return crypto_skcipher_encrypt(&rctx->fallback_req);
 660
 661	return crypto_skcipher_decrypt(&rctx->fallback_req);
 662}
 663
 664static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
 665{
 666	struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
 667	struct sahara_ctx *ctx = crypto_skcipher_ctx(
 668		crypto_skcipher_reqtfm(req));
 669	struct sahara_dev *dev = dev_ptr;
 670
 671	if (!req->cryptlen)
 672		return 0;
 673
 674	if (unlikely(ctx->keylen != AES_KEYSIZE_128))
 675		return sahara_aes_fallback(req, mode);
 676
 677	dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
 678		req->cryptlen, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
 679
 680	if (!IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE))
 
 
 681		return -EINVAL;
 
 682
 683	rctx->mode = mode;
 684
 685	return crypto_transfer_skcipher_request_to_engine(dev->engine, req);
 
 
 
 
 
 
 686}
 687
 688static int sahara_aes_ecb_encrypt(struct skcipher_request *req)
 689{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 690	return sahara_aes_crypt(req, FLAGS_ENCRYPT);
 691}
 692
 693static int sahara_aes_ecb_decrypt(struct skcipher_request *req)
 694{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 695	return sahara_aes_crypt(req, 0);
 696}
 697
 698static int sahara_aes_cbc_encrypt(struct skcipher_request *req)
 699{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 700	return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
 701}
 702
 703static int sahara_aes_cbc_decrypt(struct skcipher_request *req)
 704{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 705	return sahara_aes_crypt(req, FLAGS_CBC);
 706}
 707
 708static int sahara_aes_init_tfm(struct crypto_skcipher *tfm)
 709{
 710	const char *name = crypto_tfm_alg_name(&tfm->base);
 711	struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
 712
 713	ctx->fallback = crypto_alloc_skcipher(name, 0,
 
 714					      CRYPTO_ALG_NEED_FALLBACK);
 715	if (IS_ERR(ctx->fallback)) {
 716		pr_err("Error allocating fallback algo %s\n", name);
 717		return PTR_ERR(ctx->fallback);
 718	}
 719
 720	crypto_skcipher_set_reqsize(tfm, sizeof(struct sahara_aes_reqctx) +
 721					 crypto_skcipher_reqsize(ctx->fallback));
 722
 723	return 0;
 724}
 725
 726static void sahara_aes_exit_tfm(struct crypto_skcipher *tfm)
 727{
 728	struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
 729
 730	crypto_free_skcipher(ctx->fallback);
 731}
 732
 733static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
 734			      struct sahara_sha_reqctx *rctx)
 735{
 736	u32 hdr = 0;
 737
 738	hdr = rctx->mode;
 739
 740	if (rctx->first) {
 741		hdr |= SAHARA_HDR_MDHA_SET_MODE_HASH;
 742		hdr |= SAHARA_HDR_MDHA_INIT;
 743	} else {
 744		hdr |= SAHARA_HDR_MDHA_SET_MODE_MD_KEY;
 745	}
 746
 747	if (rctx->last)
 748		hdr |= SAHARA_HDR_MDHA_PDATA;
 749
 750	if (hweight_long(hdr) % 2 == 0)
 751		hdr |= SAHARA_HDR_PARITY_BIT;
 752
 753	return hdr;
 754}
 755
 756static int sahara_sha_hw_links_create(struct sahara_dev *dev,
 757				       struct sahara_sha_reqctx *rctx,
 758				       int start)
 759{
 760	struct scatterlist *sg;
 761	unsigned int len;
 762	unsigned int i;
 763	int ret;
 764
 765	dev->in_sg = rctx->in_sg;
 766
 767	dev->nb_in_sg = sg_nents_for_len(dev->in_sg, rctx->total);
 768	if (dev->nb_in_sg < 0) {
 769		dev_err(dev->device, "Invalid numbers of src SG.\n");
 770		return dev->nb_in_sg;
 771	}
 772	if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) {
 773		dev_err(dev->device, "not enough hw links (%d)\n",
 774			dev->nb_in_sg + dev->nb_out_sg);
 775		return -EINVAL;
 776	}
 777
 778	sg = dev->in_sg;
 779	ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg, DMA_TO_DEVICE);
 780	if (!ret)
 781		return -EFAULT;
 782
 783	len = rctx->total;
 784	for (i = start; i < dev->nb_in_sg + start; i++) {
 785		dev->hw_link[i]->len = min(len, sg->length);
 786		dev->hw_link[i]->p = sg->dma_address;
 787		if (i == (dev->nb_in_sg + start - 1)) {
 788			dev->hw_link[i]->next = 0;
 789		} else {
 790			len -= min(len, sg->length);
 791			dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
 792			sg = sg_next(sg);
 793		}
 794	}
 795
 796	return i;
 797}
 798
 799static int sahara_sha_hw_data_descriptor_create(struct sahara_dev *dev,
 800						struct sahara_sha_reqctx *rctx,
 801						struct ahash_request *req,
 802						int index)
 803{
 804	unsigned result_len;
 805	int i = index;
 806
 807	if (rctx->first)
 808		/* Create initial descriptor: #8*/
 809		dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
 810	else
 811		/* Create hash descriptor: #10. Must follow #6. */
 812		dev->hw_desc[index]->hdr = SAHARA_HDR_MDHA_HASH;
 813
 814	dev->hw_desc[index]->len1 = rctx->total;
 815	if (dev->hw_desc[index]->len1 == 0) {
 816		/* if len1 is 0, p1 must be 0, too */
 817		dev->hw_desc[index]->p1 = 0;
 818		rctx->sg_in_idx = 0;
 819	} else {
 820		/* Create input links */
 821		dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
 822		i = sahara_sha_hw_links_create(dev, rctx, index);
 823
 824		rctx->sg_in_idx = index;
 825		if (i < 0)
 826			return i;
 827	}
 828
 829	dev->hw_desc[index]->p2 = dev->hw_phys_link[i];
 830
 831	/* Save the context for the next operation */
 832	result_len = rctx->context_size;
 833	dev->hw_link[i]->p = dev->context_phys_base;
 834
 835	dev->hw_link[i]->len = result_len;
 836	dev->hw_desc[index]->len2 = result_len;
 837
 838	dev->hw_link[i]->next = 0;
 839
 840	return 0;
 841}
 842
 843/*
 844 * Load descriptor aka #6
 845 *
 846 * To load a previously saved context back to the MDHA unit
 847 *
 848 * p1: Saved Context
 849 * p2: NULL
 850 *
 851 */
 852static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
 853						struct sahara_sha_reqctx *rctx,
 854						struct ahash_request *req,
 855						int index)
 856{
 857	dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
 858
 859	dev->hw_desc[index]->len1 = rctx->context_size;
 860	dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
 861	dev->hw_desc[index]->len2 = 0;
 862	dev->hw_desc[index]->p2 = 0;
 863
 864	dev->hw_link[index]->len = rctx->context_size;
 865	dev->hw_link[index]->p = dev->context_phys_base;
 866	dev->hw_link[index]->next = 0;
 867
 868	return 0;
 869}
 870
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 871static int sahara_sha_prepare_request(struct ahash_request *req)
 872{
 873	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 874	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
 875	unsigned int hash_later;
 876	unsigned int block_size;
 877	unsigned int len;
 878
 879	block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
 880
 881	/* append bytes from previous operation */
 882	len = rctx->buf_cnt + req->nbytes;
 883
 884	/* only the last transfer can be padded in hardware */
 885	if (!rctx->last && (len < block_size)) {
 886		/* to few data, save for next operation */
 887		scatterwalk_map_and_copy(rctx->buf + rctx->buf_cnt, req->src,
 888					 0, req->nbytes, 0);
 889		rctx->buf_cnt += req->nbytes;
 890
 891		return 0;
 892	}
 893
 894	/* add data from previous operation first */
 895	if (rctx->buf_cnt)
 896		memcpy(rctx->rembuf, rctx->buf, rctx->buf_cnt);
 897
 898	/* data must always be a multiple of block_size */
 899	hash_later = rctx->last ? 0 : len & (block_size - 1);
 900	if (hash_later) {
 901		unsigned int offset = req->nbytes - hash_later;
 902		/* Save remaining bytes for later use */
 903		scatterwalk_map_and_copy(rctx->buf, req->src, offset,
 904					hash_later, 0);
 905	}
 906
 907	rctx->total = len - hash_later;
 
 
 
 
 908	/* have data from previous operation and current */
 909	if (rctx->buf_cnt && req->nbytes) {
 910		sg_init_table(rctx->in_sg_chain, 2);
 911		sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
 
 912		sg_chain(rctx->in_sg_chain, 2, req->src);
 
 
 913		rctx->in_sg = rctx->in_sg_chain;
 
 
 914	/* only data from previous operation */
 915	} else if (rctx->buf_cnt) {
 916		rctx->in_sg = rctx->in_sg_chain;
 
 
 
 
 917		sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
 
 918	/* no data from previous operation */
 919	} else {
 920		rctx->in_sg = req->src;
 
 
 921	}
 922
 923	/* on next call, we only have the remaining data in the buffer */
 924	rctx->buf_cnt = hash_later;
 925
 926	return -EINPROGRESS;
 927}
 928
 929static int sahara_sha_process(struct ahash_request *req)
 930{
 931	struct sahara_dev *dev = dev_ptr;
 932	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
 933	int ret;
 934	unsigned long timeout;
 935
 936	ret = sahara_sha_prepare_request(req);
 937	if (!ret)
 938		return ret;
 939
 940	if (rctx->first) {
 941		ret = sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
 942		if (ret)
 943			return ret;
 944
 945		dev->hw_desc[0]->next = 0;
 946		rctx->first = 0;
 947	} else {
 948		memcpy(dev->context_base, rctx->context, rctx->context_size);
 949
 950		sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
 951		dev->hw_desc[0]->next = dev->hw_phys_desc[1];
 952		ret = sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
 953		if (ret)
 954			return ret;
 955
 956		dev->hw_desc[1]->next = 0;
 957	}
 958
 959	sahara_dump_descriptors(dev);
 960	sahara_dump_links(dev);
 961
 962	reinit_completion(&dev->dma_completion);
 963
 964	sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
 965
 966	timeout = wait_for_completion_timeout(&dev->dma_completion,
 967				msecs_to_jiffies(SAHARA_TIMEOUT_MS));
 968
 969	if (rctx->sg_in_idx)
 970		dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
 971			     DMA_TO_DEVICE);
 972
 973	if (!timeout) {
 974		dev_err(dev->device, "SHA timeout\n");
 975		return -ETIMEDOUT;
 976	}
 977
 
 
 
 
 978	memcpy(rctx->context, dev->context_base, rctx->context_size);
 979
 980	if (req->result && rctx->last)
 981		memcpy(req->result, rctx->context, rctx->digest_size);
 982
 983	return 0;
 984}
 985
 986static int sahara_do_one_request(struct crypto_engine *engine, void *areq)
 987{
 988	struct crypto_async_request *async_req = areq;
 989	int err;
 
 
 990
 991	if (crypto_tfm_alg_type(async_req->tfm) == CRYPTO_ALG_TYPE_AHASH) {
 992		struct ahash_request *req = ahash_request_cast(async_req);
 993
 994		err = sahara_sha_process(req);
 995		local_bh_disable();
 996		crypto_finalize_hash_request(engine, req, err);
 997		local_bh_enable();
 998	} else {
 999		struct skcipher_request *req = skcipher_request_cast(async_req);
1000
1001		err = sahara_aes_process(skcipher_request_cast(async_req));
1002		local_bh_disable();
1003		crypto_finalize_skcipher_request(engine, req, err);
1004		local_bh_enable();
1005	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1006
1007	return 0;
1008}
1009
1010static int sahara_sha_enqueue(struct ahash_request *req, int last)
1011{
1012	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1013	struct sahara_dev *dev = dev_ptr;
 
1014
1015	if (!req->nbytes && !last)
1016		return 0;
1017
1018	rctx->last = last;
1019
1020	return crypto_transfer_hash_request_to_engine(dev->engine, req);
 
 
 
 
 
 
 
 
 
 
 
1021}
1022
1023static int sahara_sha_init(struct ahash_request *req)
1024{
1025	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1026	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1027
1028	memset(rctx, 0, sizeof(*rctx));
1029
1030	switch (crypto_ahash_digestsize(tfm)) {
1031	case SHA1_DIGEST_SIZE:
1032		rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA1;
1033		rctx->digest_size = SHA1_DIGEST_SIZE;
1034		break;
1035	case SHA256_DIGEST_SIZE:
1036		rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA256;
1037		rctx->digest_size = SHA256_DIGEST_SIZE;
1038		break;
1039	default:
1040		return -EINVAL;
1041	}
1042
1043	rctx->context_size = rctx->digest_size + 4;
1044	rctx->first = 1;
1045
1046	return 0;
1047}
1048
1049static int sahara_sha_update(struct ahash_request *req)
1050{
1051	return sahara_sha_enqueue(req, 0);
1052}
1053
1054static int sahara_sha_final(struct ahash_request *req)
1055{
1056	req->nbytes = 0;
1057	return sahara_sha_enqueue(req, 1);
1058}
1059
1060static int sahara_sha_finup(struct ahash_request *req)
1061{
1062	return sahara_sha_enqueue(req, 1);
1063}
1064
1065static int sahara_sha_digest(struct ahash_request *req)
1066{
1067	sahara_sha_init(req);
1068
1069	return sahara_sha_finup(req);
1070}
1071
1072static int sahara_sha_export(struct ahash_request *req, void *out)
1073{
1074	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1075
1076	memcpy(out, rctx, sizeof(struct sahara_sha_reqctx));
1077
1078	return 0;
1079}
1080
1081static int sahara_sha_import(struct ahash_request *req, const void *in)
1082{
1083	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1084
1085	memcpy(rctx, in, sizeof(struct sahara_sha_reqctx));
1086
1087	return 0;
1088}
1089
1090static int sahara_sha_cra_init(struct crypto_tfm *tfm)
1091{
1092	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1093				 sizeof(struct sahara_sha_reqctx));
 
1094
1095	return 0;
1096}
1097
1098static struct skcipher_engine_alg aes_algs[] = {
1099{
1100	.base = {
1101		.base.cra_name		= "ecb(aes)",
1102		.base.cra_driver_name	= "sahara-ecb-aes",
1103		.base.cra_priority	= 300,
1104		.base.cra_flags		= CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1105		.base.cra_blocksize	= AES_BLOCK_SIZE,
1106		.base.cra_ctxsize	= sizeof(struct sahara_ctx),
1107		.base.cra_alignmask	= 0x0,
1108		.base.cra_module	= THIS_MODULE,
1109
1110		.init			= sahara_aes_init_tfm,
1111		.exit			= sahara_aes_exit_tfm,
1112		.min_keysize		= AES_MIN_KEY_SIZE,
1113		.max_keysize		= AES_MAX_KEY_SIZE,
1114		.setkey			= sahara_aes_setkey,
1115		.encrypt		= sahara_aes_ecb_encrypt,
1116		.decrypt		= sahara_aes_ecb_decrypt,
1117	},
1118	.op = {
1119		.do_one_request = sahara_do_one_request,
1120	},
1121}, {
1122	.base = {
1123		.base.cra_name		= "cbc(aes)",
1124		.base.cra_driver_name	= "sahara-cbc-aes",
1125		.base.cra_priority	= 300,
1126		.base.cra_flags		= CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1127		.base.cra_blocksize	= AES_BLOCK_SIZE,
1128		.base.cra_ctxsize	= sizeof(struct sahara_ctx),
1129		.base.cra_alignmask	= 0x0,
1130		.base.cra_module	= THIS_MODULE,
1131
1132		.init			= sahara_aes_init_tfm,
1133		.exit			= sahara_aes_exit_tfm,
1134		.min_keysize		= AES_MIN_KEY_SIZE,
1135		.max_keysize		= AES_MAX_KEY_SIZE,
1136		.ivsize			= AES_BLOCK_SIZE,
1137		.setkey			= sahara_aes_setkey,
1138		.encrypt		= sahara_aes_cbc_encrypt,
1139		.decrypt		= sahara_aes_cbc_decrypt,
1140	},
1141	.op = {
1142		.do_one_request = sahara_do_one_request,
1143	},
1144}
1145};
1146
1147static struct ahash_engine_alg sha_v3_algs[] = {
1148{
1149	.base = {
1150		.init		= sahara_sha_init,
1151		.update		= sahara_sha_update,
1152		.final		= sahara_sha_final,
1153		.finup		= sahara_sha_finup,
1154		.digest		= sahara_sha_digest,
1155		.export		= sahara_sha_export,
1156		.import		= sahara_sha_import,
1157		.halg.digestsize	= SHA1_DIGEST_SIZE,
1158		.halg.statesize         = sizeof(struct sahara_sha_reqctx),
1159		.halg.base	= {
1160			.cra_name		= "sha1",
1161			.cra_driver_name	= "sahara-sha1",
1162			.cra_priority		= 300,
1163			.cra_flags		= CRYPTO_ALG_ASYNC |
1164							CRYPTO_ALG_NEED_FALLBACK,
1165			.cra_blocksize		= SHA1_BLOCK_SIZE,
1166			.cra_ctxsize		= sizeof(struct sahara_ctx),
1167			.cra_alignmask		= 0,
1168			.cra_module		= THIS_MODULE,
1169			.cra_init		= sahara_sha_cra_init,
1170		}
1171	},
1172	.op = {
1173		.do_one_request = sahara_do_one_request,
1174	},
1175},
1176};
1177
1178static struct ahash_engine_alg sha_v4_algs[] = {
1179{
1180	.base = {
1181		.init		= sahara_sha_init,
1182		.update		= sahara_sha_update,
1183		.final		= sahara_sha_final,
1184		.finup		= sahara_sha_finup,
1185		.digest		= sahara_sha_digest,
1186		.export		= sahara_sha_export,
1187		.import		= sahara_sha_import,
1188		.halg.digestsize	= SHA256_DIGEST_SIZE,
1189		.halg.statesize         = sizeof(struct sahara_sha_reqctx),
1190		.halg.base	= {
1191			.cra_name		= "sha256",
1192			.cra_driver_name	= "sahara-sha256",
1193			.cra_priority		= 300,
1194			.cra_flags		= CRYPTO_ALG_ASYNC |
1195							CRYPTO_ALG_NEED_FALLBACK,
1196			.cra_blocksize		= SHA256_BLOCK_SIZE,
1197			.cra_ctxsize		= sizeof(struct sahara_ctx),
1198			.cra_alignmask		= 0,
1199			.cra_module		= THIS_MODULE,
1200			.cra_init		= sahara_sha_cra_init,
1201		}
1202	},
1203	.op = {
1204		.do_one_request = sahara_do_one_request,
1205	},
1206},
1207};
1208
1209static irqreturn_t sahara_irq_handler(int irq, void *data)
1210{
1211	struct sahara_dev *dev = data;
1212	unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
1213	unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
1214
1215	sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR,
1216		     SAHARA_REG_CMD);
1217
1218	sahara_decode_status(dev, stat);
1219
1220	if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY)
1221		return IRQ_NONE;
1222
1223	if (SAHARA_STATUS_GET_STATE(stat) != SAHARA_STATE_COMPLETE)
 
1224		sahara_decode_error(dev, err);
 
 
1225
1226	complete(&dev->dma_completion);
1227
1228	return IRQ_HANDLED;
1229}
1230
1231
1232static int sahara_register_algs(struct sahara_dev *dev)
1233{
1234	int err;
 
1235
1236	err = crypto_engine_register_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
1237	if (err)
1238		return err;
1239
1240	err = crypto_engine_register_ahashes(sha_v3_algs,
1241					     ARRAY_SIZE(sha_v3_algs));
1242	if (err)
1243		goto err_aes_algs;
1244
1245	if (dev->version > SAHARA_VERSION_3) {
1246		err = crypto_engine_register_ahashes(sha_v4_algs,
1247						     ARRAY_SIZE(sha_v4_algs));
1248		if (err)
1249			goto err_sha_v3_algs;
1250	}
1251
 
 
 
 
 
 
 
1252	return 0;
1253
 
 
 
 
1254err_sha_v3_algs:
1255	crypto_engine_unregister_ahashes(sha_v3_algs, ARRAY_SIZE(sha_v3_algs));
 
1256
1257err_aes_algs:
1258	crypto_engine_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
 
1259
1260	return err;
1261}
1262
1263static void sahara_unregister_algs(struct sahara_dev *dev)
1264{
1265	crypto_engine_unregister_skciphers(aes_algs, ARRAY_SIZE(aes_algs));
1266	crypto_engine_unregister_ahashes(sha_v3_algs, ARRAY_SIZE(sha_v3_algs));
 
 
 
 
 
1267
1268	if (dev->version > SAHARA_VERSION_3)
1269		crypto_engine_unregister_ahashes(sha_v4_algs,
1270						 ARRAY_SIZE(sha_v4_algs));
1271}
1272
1273static const struct of_device_id sahara_dt_ids[] = {
 
 
 
 
 
 
1274	{ .compatible = "fsl,imx53-sahara" },
1275	{ .compatible = "fsl,imx27-sahara" },
1276	{ /* sentinel */ }
1277};
1278MODULE_DEVICE_TABLE(of, sahara_dt_ids);
1279
1280static int sahara_probe(struct platform_device *pdev)
1281{
1282	struct sahara_dev *dev;
 
1283	u32 version;
1284	int irq;
1285	int err;
1286	int i;
1287
1288	dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
1289	if (!dev)
 
1290		return -ENOMEM;
 
1291
1292	dev->device = &pdev->dev;
1293	platform_set_drvdata(pdev, dev);
1294
1295	/* Get the base address */
1296	dev->regs_base = devm_platform_ioremap_resource(pdev, 0);
 
1297	if (IS_ERR(dev->regs_base))
1298		return PTR_ERR(dev->regs_base);
1299
1300	/* Get the IRQ */
1301	irq = platform_get_irq(pdev,  0);
1302	if (irq < 0)
 
1303		return irq;
 
1304
1305	err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
1306			       0, dev_name(&pdev->dev), dev);
1307	if (err)
1308		return dev_err_probe(&pdev->dev, err,
1309				     "failed to request irq\n");
 
1310
1311	/* clocks */
1312	dev->clk_ipg = devm_clk_get_enabled(&pdev->dev, "ipg");
1313	if (IS_ERR(dev->clk_ipg))
1314		return dev_err_probe(&pdev->dev, PTR_ERR(dev->clk_ipg),
1315				     "Could not get ipg clock\n");
1316
1317	dev->clk_ahb = devm_clk_get_enabled(&pdev->dev, "ahb");
1318	if (IS_ERR(dev->clk_ahb))
1319		return dev_err_probe(&pdev->dev, PTR_ERR(dev->clk_ahb),
1320				     "Could not get ahb clock\n");
 
 
1321
1322	/* Allocate HW descriptors */
1323	dev->hw_desc[0] = dmam_alloc_coherent(&pdev->dev,
1324			SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1325			&dev->hw_phys_desc[0], GFP_KERNEL);
1326	if (!dev->hw_desc[0])
 
1327		return -ENOMEM;
 
1328	dev->hw_desc[1] = dev->hw_desc[0] + 1;
1329	dev->hw_phys_desc[1] = dev->hw_phys_desc[0] +
1330				sizeof(struct sahara_hw_desc);
1331
1332	/* Allocate space for iv and key */
1333	dev->key_base = dmam_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
1334				&dev->key_phys_base, GFP_KERNEL);
1335	if (!dev->key_base)
 
1336		return -ENOMEM;
 
1337	dev->iv_base = dev->key_base + AES_KEYSIZE_128;
1338	dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
1339
1340	/* Allocate space for context: largest digest + message length field */
1341	dev->context_base = dmam_alloc_coherent(&pdev->dev,
1342					SHA256_DIGEST_SIZE + 4,
1343					&dev->context_phys_base, GFP_KERNEL);
1344	if (!dev->context_base)
 
1345		return -ENOMEM;
 
1346
1347	/* Allocate space for HW links */
1348	dev->hw_link[0] = dmam_alloc_coherent(&pdev->dev,
1349			SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
1350			&dev->hw_phys_link[0], GFP_KERNEL);
1351	if (!dev->hw_link[0])
 
1352		return -ENOMEM;
 
1353	for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
1354		dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
1355					sizeof(struct sahara_hw_link);
1356		dev->hw_link[i] = dev->hw_link[i - 1] + 1;
1357	}
1358
1359	dev_ptr = dev;
1360
1361	dev->engine = crypto_engine_alloc_init(&pdev->dev, true);
1362	if (!dev->engine)
1363		return -ENOMEM;
1364
1365	err = crypto_engine_start(dev->engine);
1366	if (err) {
1367		crypto_engine_exit(dev->engine);
1368		return dev_err_probe(&pdev->dev, err,
1369				     "Could not start crypto engine\n");
1370	}
1371
1372	init_completion(&dev->dma_completion);
1373
 
 
 
 
 
 
 
1374	version = sahara_read(dev, SAHARA_REG_VERSION);
1375	if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) {
1376		if (version != SAHARA_VERSION_3)
1377			err = -ENODEV;
1378	} else if (of_device_is_compatible(pdev->dev.of_node,
1379			"fsl,imx53-sahara")) {
1380		if (((version >> 8) & 0xff) != SAHARA_VERSION_4)
1381			err = -ENODEV;
1382		version = (version >> 8) & 0xff;
1383	}
1384	if (err == -ENODEV) {
1385		dev_err_probe(&pdev->dev, err,
1386			      "SAHARA version %d not supported\n", version);
1387		goto err_algs;
1388	}
1389
1390	dev->version = version;
1391
1392	sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH,
1393		     SAHARA_REG_CMD);
1394	sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) |
1395			SAHARA_CONTROL_SET_MAXBURST(8) |
1396			SAHARA_CONTROL_RNG_AUTORSD |
1397			SAHARA_CONTROL_ENABLE_INT,
1398			SAHARA_REG_CONTROL);
1399
1400	err = sahara_register_algs(dev);
1401	if (err)
1402		goto err_algs;
1403
1404	dev_info(&pdev->dev, "SAHARA version %d initialized\n", version);
1405
1406	return 0;
1407
1408err_algs:
1409	crypto_engine_exit(dev->engine);
 
 
 
 
1410
1411	return err;
1412}
1413
1414static void sahara_remove(struct platform_device *pdev)
1415{
1416	struct sahara_dev *dev = platform_get_drvdata(pdev);
1417
1418	crypto_engine_exit(dev->engine);
 
1419	sahara_unregister_algs(dev);
 
 
 
 
 
 
 
1420}
1421
1422static struct platform_driver sahara_driver = {
1423	.probe		= sahara_probe,
1424	.remove_new	= sahara_remove,
1425	.driver		= {
1426		.name	= SAHARA_NAME,
1427		.of_match_table = sahara_dt_ids,
1428	},
 
1429};
1430
1431module_platform_driver(sahara_driver);
1432
1433MODULE_LICENSE("GPL");
1434MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
1435MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de>");
1436MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");
v4.10.11
 
   1/*
   2 * Cryptographic API.
   3 *
   4 * Support for SAHARA cryptographic accelerator.
   5 *
   6 * Copyright (c) 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
   7 * Copyright (c) 2013 Vista Silicon S.L.
   8 * Author: Javier Martin <javier.martin@vista-silicon.com>
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License version 2 as published
  12 * by the Free Software Foundation.
  13 *
  14 * Based on omap-aes.c and tegra-aes.c
  15 */
  16
  17#include <crypto/aes.h>
  18#include <crypto/internal/hash.h>
  19#include <crypto/internal/skcipher.h>
  20#include <crypto/scatterwalk.h>
  21#include <crypto/sha.h>
 
 
  22
  23#include <linux/clk.h>
  24#include <linux/crypto.h>
  25#include <linux/interrupt.h>
  26#include <linux/io.h>
  27#include <linux/irq.h>
  28#include <linux/kernel.h>
  29#include <linux/kthread.h>
  30#include <linux/module.h>
  31#include <linux/mutex.h>
  32#include <linux/of.h>
  33#include <linux/of_device.h>
  34#include <linux/platform_device.h>
 
  35
  36#define SHA_BUFFER_LEN		PAGE_SIZE
  37#define SAHARA_MAX_SHA_BLOCK_SIZE	SHA256_BLOCK_SIZE
  38
  39#define SAHARA_NAME "sahara"
  40#define SAHARA_VERSION_3	3
  41#define SAHARA_VERSION_4	4
  42#define SAHARA_TIMEOUT_MS	1000
  43#define SAHARA_MAX_HW_DESC	2
  44#define SAHARA_MAX_HW_LINK	20
  45
  46#define FLAGS_MODE_MASK		0x000f
  47#define FLAGS_ENCRYPT		BIT(0)
  48#define FLAGS_CBC		BIT(1)
  49#define FLAGS_NEW_KEY		BIT(3)
  50
  51#define SAHARA_HDR_BASE			0x00800000
  52#define SAHARA_HDR_SKHA_ALG_AES	0
  53#define SAHARA_HDR_SKHA_OP_ENC		(1 << 2)
  54#define SAHARA_HDR_SKHA_MODE_ECB	(0 << 3)
  55#define SAHARA_HDR_SKHA_MODE_CBC	(1 << 3)
  56#define SAHARA_HDR_FORM_DATA		(5 << 16)
  57#define SAHARA_HDR_FORM_KEY		(8 << 16)
  58#define SAHARA_HDR_LLO			(1 << 24)
  59#define SAHARA_HDR_CHA_SKHA		(1 << 28)
  60#define SAHARA_HDR_CHA_MDHA		(2 << 28)
  61#define SAHARA_HDR_PARITY_BIT		(1 << 31)
  62
  63#define SAHARA_HDR_MDHA_SET_MODE_MD_KEY	0x20880000
  64#define SAHARA_HDR_MDHA_SET_MODE_HASH	0x208D0000
  65#define SAHARA_HDR_MDHA_HASH		0xA0850000
  66#define SAHARA_HDR_MDHA_STORE_DIGEST	0x20820000
  67#define SAHARA_HDR_MDHA_ALG_SHA1	0
  68#define SAHARA_HDR_MDHA_ALG_MD5		1
  69#define SAHARA_HDR_MDHA_ALG_SHA256	2
  70#define SAHARA_HDR_MDHA_ALG_SHA224	3
  71#define SAHARA_HDR_MDHA_PDATA		(1 << 2)
  72#define SAHARA_HDR_MDHA_HMAC		(1 << 3)
  73#define SAHARA_HDR_MDHA_INIT		(1 << 5)
  74#define SAHARA_HDR_MDHA_IPAD		(1 << 6)
  75#define SAHARA_HDR_MDHA_OPAD		(1 << 7)
  76#define SAHARA_HDR_MDHA_SWAP		(1 << 8)
  77#define SAHARA_HDR_MDHA_MAC_FULL	(1 << 9)
  78#define SAHARA_HDR_MDHA_SSL		(1 << 10)
  79
  80/* SAHARA can only process one request at a time */
  81#define SAHARA_QUEUE_LENGTH	1
  82
  83#define SAHARA_REG_VERSION	0x00
  84#define SAHARA_REG_DAR		0x04
  85#define SAHARA_REG_CONTROL	0x08
  86#define		SAHARA_CONTROL_SET_THROTTLE(x)	(((x) & 0xff) << 24)
  87#define		SAHARA_CONTROL_SET_MAXBURST(x)	(((x) & 0xff) << 16)
  88#define		SAHARA_CONTROL_RNG_AUTORSD	(1 << 7)
  89#define		SAHARA_CONTROL_ENABLE_INT	(1 << 4)
  90#define SAHARA_REG_CMD		0x0C
  91#define		SAHARA_CMD_RESET		(1 << 0)
  92#define		SAHARA_CMD_CLEAR_INT		(1 << 8)
  93#define		SAHARA_CMD_CLEAR_ERR		(1 << 9)
  94#define		SAHARA_CMD_SINGLE_STEP		(1 << 10)
  95#define		SAHARA_CMD_MODE_BATCH		(1 << 16)
  96#define		SAHARA_CMD_MODE_DEBUG		(1 << 18)
  97#define	SAHARA_REG_STATUS	0x10
  98#define		SAHARA_STATUS_GET_STATE(x)	((x) & 0x7)
  99#define			SAHARA_STATE_IDLE	0
 100#define			SAHARA_STATE_BUSY	1
 101#define			SAHARA_STATE_ERR	2
 102#define			SAHARA_STATE_FAULT	3
 103#define			SAHARA_STATE_COMPLETE	4
 104#define			SAHARA_STATE_COMP_FLAG	(1 << 2)
 105#define		SAHARA_STATUS_DAR_FULL		(1 << 3)
 106#define		SAHARA_STATUS_ERROR		(1 << 4)
 107#define		SAHARA_STATUS_SECURE		(1 << 5)
 108#define		SAHARA_STATUS_FAIL		(1 << 6)
 109#define		SAHARA_STATUS_INIT		(1 << 7)
 110#define		SAHARA_STATUS_RNG_RESEED	(1 << 8)
 111#define		SAHARA_STATUS_ACTIVE_RNG	(1 << 9)
 112#define		SAHARA_STATUS_ACTIVE_MDHA	(1 << 10)
 113#define		SAHARA_STATUS_ACTIVE_SKHA	(1 << 11)
 114#define		SAHARA_STATUS_MODE_BATCH	(1 << 16)
 115#define		SAHARA_STATUS_MODE_DEDICATED	(1 << 17)
 116#define		SAHARA_STATUS_MODE_DEBUG	(1 << 18)
 117#define		SAHARA_STATUS_GET_ISTATE(x)	(((x) >> 24) & 0xff)
 118#define SAHARA_REG_ERRSTATUS	0x14
 119#define		SAHARA_ERRSTATUS_GET_SOURCE(x)	((x) & 0xf)
 120#define			SAHARA_ERRSOURCE_CHA	14
 121#define			SAHARA_ERRSOURCE_DMA	15
 122#define		SAHARA_ERRSTATUS_DMA_DIR	(1 << 8)
 123#define		SAHARA_ERRSTATUS_GET_DMASZ(x)(((x) >> 9) & 0x3)
 124#define		SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7)
 125#define		SAHARA_ERRSTATUS_GET_CHASRC(x)	(((x) >> 16) & 0xfff)
 126#define		SAHARA_ERRSTATUS_GET_CHAERR(x)	(((x) >> 28) & 0x3)
 127#define SAHARA_REG_FADDR	0x18
 128#define SAHARA_REG_CDAR		0x1C
 129#define SAHARA_REG_IDAR		0x20
 130
 131struct sahara_hw_desc {
 132	u32	hdr;
 133	u32	len1;
 134	u32	p1;
 135	u32	len2;
 136	u32	p2;
 137	u32	next;
 138};
 139
 140struct sahara_hw_link {
 141	u32	len;
 142	u32	p;
 143	u32	next;
 144};
 145
 146struct sahara_ctx {
 147	unsigned long flags;
 148
 149	/* AES-specific context */
 150	int keylen;
 151	u8 key[AES_KEYSIZE_128];
 152	struct crypto_skcipher *fallback;
 153};
 154
 155struct sahara_aes_reqctx {
 156	unsigned long mode;
 
 
 157};
 158
 159/*
 160 * struct sahara_sha_reqctx - private data per request
 161 * @buf: holds data for requests smaller than block_size
 162 * @rembuf: used to prepare one block_size-aligned request
 163 * @context: hw-specific context for request. Digest is extracted from this
 164 * @mode: specifies what type of hw-descriptor needs to be built
 165 * @digest_size: length of digest for this request
 166 * @context_size: length of hw-context for this request.
 167 *                Always digest_size + 4
 168 * @buf_cnt: number of bytes saved in buf
 169 * @sg_in_idx: number of hw links
 170 * @in_sg: scatterlist for input data
 171 * @in_sg_chain: scatterlists for chained input data
 172 * @total: total number of bytes for transfer
 173 * @last: is this the last block
 174 * @first: is this the first block
 175 * @active: inside a transfer
 176 */
 177struct sahara_sha_reqctx {
 178	u8			buf[SAHARA_MAX_SHA_BLOCK_SIZE];
 179	u8			rembuf[SAHARA_MAX_SHA_BLOCK_SIZE];
 180	u8			context[SHA256_DIGEST_SIZE + 4];
 181	unsigned int		mode;
 182	unsigned int		digest_size;
 183	unsigned int		context_size;
 184	unsigned int		buf_cnt;
 185	unsigned int		sg_in_idx;
 186	struct scatterlist	*in_sg;
 187	struct scatterlist	in_sg_chain[2];
 188	size_t			total;
 189	unsigned int		last;
 190	unsigned int		first;
 191	unsigned int		active;
 192};
 193
 194struct sahara_dev {
 195	struct device		*device;
 196	unsigned int		version;
 197	void __iomem		*regs_base;
 198	struct clk		*clk_ipg;
 199	struct clk		*clk_ahb;
 200	struct mutex		queue_mutex;
 201	struct task_struct	*kthread;
 202	struct completion	dma_completion;
 203
 204	struct sahara_ctx	*ctx;
 205	spinlock_t		lock;
 206	struct crypto_queue	queue;
 207	unsigned long		flags;
 208
 209	struct sahara_hw_desc	*hw_desc[SAHARA_MAX_HW_DESC];
 210	dma_addr_t		hw_phys_desc[SAHARA_MAX_HW_DESC];
 211
 212	u8			*key_base;
 213	dma_addr_t		key_phys_base;
 214
 215	u8			*iv_base;
 216	dma_addr_t		iv_phys_base;
 217
 218	u8			*context_base;
 219	dma_addr_t		context_phys_base;
 220
 221	struct sahara_hw_link	*hw_link[SAHARA_MAX_HW_LINK];
 222	dma_addr_t		hw_phys_link[SAHARA_MAX_HW_LINK];
 223
 224	size_t			total;
 225	struct scatterlist	*in_sg;
 226	int		nb_in_sg;
 227	struct scatterlist	*out_sg;
 228	int		nb_out_sg;
 229
 230	u32			error;
 231};
 232
 233static struct sahara_dev *dev_ptr;
 234
 235static inline void sahara_write(struct sahara_dev *dev, u32 data, u32 reg)
 236{
 237	writel(data, dev->regs_base + reg);
 238}
 239
 240static inline unsigned int sahara_read(struct sahara_dev *dev, u32 reg)
 241{
 242	return readl(dev->regs_base + reg);
 243}
 244
 245static u32 sahara_aes_key_hdr(struct sahara_dev *dev)
 246{
 247	u32 hdr = SAHARA_HDR_BASE | SAHARA_HDR_SKHA_ALG_AES |
 248			SAHARA_HDR_FORM_KEY | SAHARA_HDR_LLO |
 249			SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
 250
 251	if (dev->flags & FLAGS_CBC) {
 252		hdr |= SAHARA_HDR_SKHA_MODE_CBC;
 253		hdr ^= SAHARA_HDR_PARITY_BIT;
 254	}
 255
 256	if (dev->flags & FLAGS_ENCRYPT) {
 257		hdr |= SAHARA_HDR_SKHA_OP_ENC;
 258		hdr ^= SAHARA_HDR_PARITY_BIT;
 259	}
 260
 261	return hdr;
 262}
 263
 264static u32 sahara_aes_data_link_hdr(struct sahara_dev *dev)
 265{
 266	return SAHARA_HDR_BASE | SAHARA_HDR_FORM_DATA |
 267			SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
 268}
 269
 270static const char *sahara_err_src[16] = {
 271	"No error",
 272	"Header error",
 273	"Descriptor length error",
 274	"Descriptor length or pointer error",
 275	"Link length error",
 276	"Link pointer error",
 277	"Input buffer error",
 278	"Output buffer error",
 279	"Output buffer starvation",
 280	"Internal state fault",
 281	"General descriptor problem",
 282	"Reserved",
 283	"Descriptor address error",
 284	"Link address error",
 285	"CHA error",
 286	"DMA error"
 287};
 288
 289static const char *sahara_err_dmasize[4] = {
 290	"Byte transfer",
 291	"Half-word transfer",
 292	"Word transfer",
 293	"Reserved"
 294};
 295
 296static const char *sahara_err_dmasrc[8] = {
 297	"No error",
 298	"AHB bus error",
 299	"Internal IP bus error",
 300	"Parity error",
 301	"DMA crosses 256 byte boundary",
 302	"DMA is busy",
 303	"Reserved",
 304	"DMA HW error"
 305};
 306
 307static const char *sahara_cha_errsrc[12] = {
 308	"Input buffer non-empty",
 309	"Illegal address",
 310	"Illegal mode",
 311	"Illegal data size",
 312	"Illegal key size",
 313	"Write during processing",
 314	"CTX read during processing",
 315	"HW error",
 316	"Input buffer disabled/underflow",
 317	"Output buffer disabled/overflow",
 318	"DES key parity error",
 319	"Reserved"
 320};
 321
 322static const char *sahara_cha_err[4] = { "No error", "SKHA", "MDHA", "RNG" };
 323
 324static void sahara_decode_error(struct sahara_dev *dev, unsigned int error)
 325{
 326	u8 source = SAHARA_ERRSTATUS_GET_SOURCE(error);
 327	u16 chasrc = ffs(SAHARA_ERRSTATUS_GET_CHASRC(error));
 328
 329	dev_err(dev->device, "%s: Error Register = 0x%08x\n", __func__, error);
 330
 331	dev_err(dev->device, "	- %s.\n", sahara_err_src[source]);
 332
 333	if (source == SAHARA_ERRSOURCE_DMA) {
 334		if (error & SAHARA_ERRSTATUS_DMA_DIR)
 335			dev_err(dev->device, "		* DMA read.\n");
 336		else
 337			dev_err(dev->device, "		* DMA write.\n");
 338
 339		dev_err(dev->device, "		* %s.\n",
 340		       sahara_err_dmasize[SAHARA_ERRSTATUS_GET_DMASZ(error)]);
 341		dev_err(dev->device, "		* %s.\n",
 342		       sahara_err_dmasrc[SAHARA_ERRSTATUS_GET_DMASRC(error)]);
 343	} else if (source == SAHARA_ERRSOURCE_CHA) {
 344		dev_err(dev->device, "		* %s.\n",
 345			sahara_cha_errsrc[chasrc]);
 346		dev_err(dev->device, "		* %s.\n",
 347		       sahara_cha_err[SAHARA_ERRSTATUS_GET_CHAERR(error)]);
 348	}
 349	dev_err(dev->device, "\n");
 350}
 351
 352static const char *sahara_state[4] = { "Idle", "Busy", "Error", "HW Fault" };
 353
 354static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
 355{
 356	u8 state;
 357
 358	if (!IS_ENABLED(DEBUG))
 359		return;
 360
 361	state = SAHARA_STATUS_GET_STATE(status);
 362
 363	dev_dbg(dev->device, "%s: Status Register = 0x%08x\n",
 364		__func__, status);
 365
 366	dev_dbg(dev->device, "	- State = %d:\n", state);
 367	if (state & SAHARA_STATE_COMP_FLAG)
 368		dev_dbg(dev->device, "		* Descriptor completed. IRQ pending.\n");
 369
 370	dev_dbg(dev->device, "		* %s.\n",
 371	       sahara_state[state & ~SAHARA_STATE_COMP_FLAG]);
 372
 373	if (status & SAHARA_STATUS_DAR_FULL)
 374		dev_dbg(dev->device, "	- DAR Full.\n");
 375	if (status & SAHARA_STATUS_ERROR)
 376		dev_dbg(dev->device, "	- Error.\n");
 377	if (status & SAHARA_STATUS_SECURE)
 378		dev_dbg(dev->device, "	- Secure.\n");
 379	if (status & SAHARA_STATUS_FAIL)
 380		dev_dbg(dev->device, "	- Fail.\n");
 381	if (status & SAHARA_STATUS_RNG_RESEED)
 382		dev_dbg(dev->device, "	- RNG Reseed Request.\n");
 383	if (status & SAHARA_STATUS_ACTIVE_RNG)
 384		dev_dbg(dev->device, "	- RNG Active.\n");
 385	if (status & SAHARA_STATUS_ACTIVE_MDHA)
 386		dev_dbg(dev->device, "	- MDHA Active.\n");
 387	if (status & SAHARA_STATUS_ACTIVE_SKHA)
 388		dev_dbg(dev->device, "	- SKHA Active.\n");
 389
 390	if (status & SAHARA_STATUS_MODE_BATCH)
 391		dev_dbg(dev->device, "	- Batch Mode.\n");
 392	else if (status & SAHARA_STATUS_MODE_DEDICATED)
 393		dev_dbg(dev->device, "	- Dedicated Mode.\n");
 394	else if (status & SAHARA_STATUS_MODE_DEBUG)
 395		dev_dbg(dev->device, "	- Debug Mode.\n");
 396
 397	dev_dbg(dev->device, "	- Internal state = 0x%02x\n",
 398	       SAHARA_STATUS_GET_ISTATE(status));
 399
 400	dev_dbg(dev->device, "Current DAR: 0x%08x\n",
 401		sahara_read(dev, SAHARA_REG_CDAR));
 402	dev_dbg(dev->device, "Initial DAR: 0x%08x\n\n",
 403		sahara_read(dev, SAHARA_REG_IDAR));
 404}
 405
 406static void sahara_dump_descriptors(struct sahara_dev *dev)
 407{
 408	int i;
 409
 410	if (!IS_ENABLED(DEBUG))
 411		return;
 412
 413	for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
 414		dev_dbg(dev->device, "Descriptor (%d) (%pad):\n",
 415			i, &dev->hw_phys_desc[i]);
 416		dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr);
 417		dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1);
 418		dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1);
 419		dev_dbg(dev->device, "\tlen2 = %u\n", dev->hw_desc[i]->len2);
 420		dev_dbg(dev->device, "\tp2 = 0x%08x\n", dev->hw_desc[i]->p2);
 421		dev_dbg(dev->device, "\tnext = 0x%08x\n",
 422			dev->hw_desc[i]->next);
 423	}
 424	dev_dbg(dev->device, "\n");
 425}
 426
 427static void sahara_dump_links(struct sahara_dev *dev)
 428{
 429	int i;
 430
 431	if (!IS_ENABLED(DEBUG))
 432		return;
 433
 434	for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
 435		dev_dbg(dev->device, "Link (%d) (%pad):\n",
 436			i, &dev->hw_phys_link[i]);
 437		dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len);
 438		dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p);
 439		dev_dbg(dev->device, "\tnext = 0x%08x\n",
 440			dev->hw_link[i]->next);
 441	}
 442	dev_dbg(dev->device, "\n");
 443}
 444
 445static int sahara_hw_descriptor_create(struct sahara_dev *dev)
 446{
 447	struct sahara_ctx *ctx = dev->ctx;
 448	struct scatterlist *sg;
 449	int ret;
 450	int i, j;
 451	int idx = 0;
 
 
 
 452
 453	/* Copy new key if necessary */
 454	if (ctx->flags & FLAGS_NEW_KEY) {
 455		memcpy(dev->key_base, ctx->key, ctx->keylen);
 456		ctx->flags &= ~FLAGS_NEW_KEY;
 457
 458		if (dev->flags & FLAGS_CBC) {
 459			dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
 460			dev->hw_desc[idx]->p1 = dev->iv_phys_base;
 461		} else {
 462			dev->hw_desc[idx]->len1 = 0;
 463			dev->hw_desc[idx]->p1 = 0;
 464		}
 465		dev->hw_desc[idx]->len2 = ctx->keylen;
 466		dev->hw_desc[idx]->p2 = dev->key_phys_base;
 467		dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
 468
 469		dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
 470
 471		idx++;
 472	}
 473
 474	dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total);
 475	if (dev->nb_in_sg < 0) {
 476		dev_err(dev->device, "Invalid numbers of src SG.\n");
 477		return dev->nb_in_sg;
 478	}
 479	dev->nb_out_sg = sg_nents_for_len(dev->out_sg, dev->total);
 480	if (dev->nb_out_sg < 0) {
 481		dev_err(dev->device, "Invalid numbers of dst SG.\n");
 482		return dev->nb_out_sg;
 483	}
 484	if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
 485		dev_err(dev->device, "not enough hw links (%d)\n",
 486			dev->nb_in_sg + dev->nb_out_sg);
 487		return -EINVAL;
 488	}
 489
 490	ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
 491			 DMA_TO_DEVICE);
 492	if (ret != dev->nb_in_sg) {
 493		dev_err(dev->device, "couldn't map in sg\n");
 494		goto unmap_in;
 495	}
 
 496	ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
 497			 DMA_FROM_DEVICE);
 498	if (ret != dev->nb_out_sg) {
 499		dev_err(dev->device, "couldn't map out sg\n");
 500		goto unmap_out;
 501	}
 502
 503	/* Create input links */
 504	dev->hw_desc[idx]->p1 = dev->hw_phys_link[0];
 505	sg = dev->in_sg;
 
 506	for (i = 0; i < dev->nb_in_sg; i++) {
 507		dev->hw_link[i]->len = sg->length;
 508		dev->hw_link[i]->p = sg->dma_address;
 509		if (i == (dev->nb_in_sg - 1)) {
 510			dev->hw_link[i]->next = 0;
 511		} else {
 
 512			dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
 513			sg = sg_next(sg);
 514		}
 515	}
 516
 517	/* Create output links */
 518	dev->hw_desc[idx]->p2 = dev->hw_phys_link[i];
 519	sg = dev->out_sg;
 
 520	for (j = i; j < dev->nb_out_sg + i; j++) {
 521		dev->hw_link[j]->len = sg->length;
 522		dev->hw_link[j]->p = sg->dma_address;
 523		if (j == (dev->nb_out_sg + i - 1)) {
 524			dev->hw_link[j]->next = 0;
 525		} else {
 
 526			dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
 527			sg = sg_next(sg);
 528		}
 529	}
 530
 531	/* Fill remaining fields of hw_desc[1] */
 532	dev->hw_desc[idx]->hdr = sahara_aes_data_link_hdr(dev);
 533	dev->hw_desc[idx]->len1 = dev->total;
 534	dev->hw_desc[idx]->len2 = dev->total;
 535	dev->hw_desc[idx]->next = 0;
 536
 537	sahara_dump_descriptors(dev);
 538	sahara_dump_links(dev);
 539
 540	sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
 541
 542	return 0;
 543
 544unmap_out:
 545	dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
 546		DMA_TO_DEVICE);
 547unmap_in:
 548	dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
 549		DMA_FROM_DEVICE);
 550
 551	return -EINVAL;
 552}
 553
 554static int sahara_aes_process(struct ablkcipher_request *req)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 555{
 
 556	struct sahara_dev *dev = dev_ptr;
 557	struct sahara_ctx *ctx;
 558	struct sahara_aes_reqctx *rctx;
 559	int ret;
 560	unsigned long timeout;
 561
 562	/* Request is ready to be dispatched by the device */
 563	dev_dbg(dev->device,
 564		"dispatch request (nbytes=%d, src=%p, dst=%p)\n",
 565		req->nbytes, req->src, req->dst);
 566
 567	/* assign new request to device */
 568	dev->total = req->nbytes;
 569	dev->in_sg = req->src;
 570	dev->out_sg = req->dst;
 571
 572	rctx = ablkcipher_request_ctx(req);
 573	ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
 574	rctx->mode &= FLAGS_MODE_MASK;
 575	dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
 576
 577	if ((dev->flags & FLAGS_CBC) && req->info)
 578		memcpy(dev->iv_base, req->info, AES_KEYSIZE_128);
 
 
 
 
 
 
 
 
 
 579
 580	/* assign new context to device */
 581	dev->ctx = ctx;
 582
 583	reinit_completion(&dev->dma_completion);
 584
 585	ret = sahara_hw_descriptor_create(dev);
 586	if (ret)
 587		return -EINVAL;
 588
 589	timeout = wait_for_completion_timeout(&dev->dma_completion,
 590				msecs_to_jiffies(SAHARA_TIMEOUT_MS));
 
 
 
 
 
 
 591	if (!timeout) {
 592		dev_err(dev->device, "AES timeout\n");
 593		return -ETIMEDOUT;
 594	}
 595
 596	dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
 597		DMA_TO_DEVICE);
 598	dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
 599		DMA_FROM_DEVICE);
 600
 601	return 0;
 602}
 603
 604static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
 605			     unsigned int keylen)
 606{
 607	struct sahara_ctx *ctx = crypto_ablkcipher_ctx(tfm);
 608	int ret;
 609
 610	ctx->keylen = keylen;
 611
 612	/* SAHARA only supports 128bit keys */
 613	if (keylen == AES_KEYSIZE_128) {
 614		memcpy(ctx->key, key, keylen);
 615		ctx->flags |= FLAGS_NEW_KEY;
 616		return 0;
 617	}
 618
 619	if (keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
 620		return -EINVAL;
 621
 622	/*
 623	 * The requested key size is not supported by HW, do a fallback.
 624	 */
 625	crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
 626	crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
 627						 CRYPTO_TFM_REQ_MASK);
 
 
 628
 629	ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
 
 
 
 
 630
 631	tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
 632	tfm->base.crt_flags |= crypto_skcipher_get_flags(ctx->fallback) &
 633			       CRYPTO_TFM_RES_MASK;
 634	return ret;
 
 
 
 
 
 
 
 
 635}
 636
 637static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
 638{
 639	struct sahara_aes_reqctx *rctx = ablkcipher_request_ctx(req);
 
 
 640	struct sahara_dev *dev = dev_ptr;
 641	int err = 0;
 
 
 
 
 
 642
 643	dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
 644		req->nbytes, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
 645
 646	if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
 647		dev_err(dev->device,
 648			"request size is not exact amount of AES blocks\n");
 649		return -EINVAL;
 650	}
 651
 652	rctx->mode = mode;
 653
 654	mutex_lock(&dev->queue_mutex);
 655	err = ablkcipher_enqueue_request(&dev->queue, req);
 656	mutex_unlock(&dev->queue_mutex);
 657
 658	wake_up_process(dev->kthread);
 659
 660	return err;
 661}
 662
 663static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req)
 664{
 665	struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
 666		crypto_ablkcipher_reqtfm(req));
 667	int err;
 668
 669	if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
 670		SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
 671
 672		skcipher_request_set_tfm(subreq, ctx->fallback);
 673		skcipher_request_set_callback(subreq, req->base.flags,
 674					      NULL, NULL);
 675		skcipher_request_set_crypt(subreq, req->src, req->dst,
 676					   req->nbytes, req->info);
 677		err = crypto_skcipher_encrypt(subreq);
 678		skcipher_request_zero(subreq);
 679		return err;
 680	}
 681
 682	return sahara_aes_crypt(req, FLAGS_ENCRYPT);
 683}
 684
 685static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req)
 686{
 687	struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
 688		crypto_ablkcipher_reqtfm(req));
 689	int err;
 690
 691	if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
 692		SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
 693
 694		skcipher_request_set_tfm(subreq, ctx->fallback);
 695		skcipher_request_set_callback(subreq, req->base.flags,
 696					      NULL, NULL);
 697		skcipher_request_set_crypt(subreq, req->src, req->dst,
 698					   req->nbytes, req->info);
 699		err = crypto_skcipher_decrypt(subreq);
 700		skcipher_request_zero(subreq);
 701		return err;
 702	}
 703
 704	return sahara_aes_crypt(req, 0);
 705}
 706
 707static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req)
 708{
 709	struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
 710		crypto_ablkcipher_reqtfm(req));
 711	int err;
 712
 713	if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
 714		SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
 715
 716		skcipher_request_set_tfm(subreq, ctx->fallback);
 717		skcipher_request_set_callback(subreq, req->base.flags,
 718					      NULL, NULL);
 719		skcipher_request_set_crypt(subreq, req->src, req->dst,
 720					   req->nbytes, req->info);
 721		err = crypto_skcipher_encrypt(subreq);
 722		skcipher_request_zero(subreq);
 723		return err;
 724	}
 725
 726	return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
 727}
 728
 729static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req)
 730{
 731	struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
 732		crypto_ablkcipher_reqtfm(req));
 733	int err;
 734
 735	if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
 736		SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
 737
 738		skcipher_request_set_tfm(subreq, ctx->fallback);
 739		skcipher_request_set_callback(subreq, req->base.flags,
 740					      NULL, NULL);
 741		skcipher_request_set_crypt(subreq, req->src, req->dst,
 742					   req->nbytes, req->info);
 743		err = crypto_skcipher_decrypt(subreq);
 744		skcipher_request_zero(subreq);
 745		return err;
 746	}
 747
 748	return sahara_aes_crypt(req, FLAGS_CBC);
 749}
 750
 751static int sahara_aes_cra_init(struct crypto_tfm *tfm)
 752{
 753	const char *name = crypto_tfm_alg_name(tfm);
 754	struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
 755
 756	ctx->fallback = crypto_alloc_skcipher(name, 0,
 757					      CRYPTO_ALG_ASYNC |
 758					      CRYPTO_ALG_NEED_FALLBACK);
 759	if (IS_ERR(ctx->fallback)) {
 760		pr_err("Error allocating fallback algo %s\n", name);
 761		return PTR_ERR(ctx->fallback);
 762	}
 763
 764	tfm->crt_ablkcipher.reqsize = sizeof(struct sahara_aes_reqctx);
 
 765
 766	return 0;
 767}
 768
 769static void sahara_aes_cra_exit(struct crypto_tfm *tfm)
 770{
 771	struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
 772
 773	crypto_free_skcipher(ctx->fallback);
 774}
 775
 776static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
 777			      struct sahara_sha_reqctx *rctx)
 778{
 779	u32 hdr = 0;
 780
 781	hdr = rctx->mode;
 782
 783	if (rctx->first) {
 784		hdr |= SAHARA_HDR_MDHA_SET_MODE_HASH;
 785		hdr |= SAHARA_HDR_MDHA_INIT;
 786	} else {
 787		hdr |= SAHARA_HDR_MDHA_SET_MODE_MD_KEY;
 788	}
 789
 790	if (rctx->last)
 791		hdr |= SAHARA_HDR_MDHA_PDATA;
 792
 793	if (hweight_long(hdr) % 2 == 0)
 794		hdr |= SAHARA_HDR_PARITY_BIT;
 795
 796	return hdr;
 797}
 798
 799static int sahara_sha_hw_links_create(struct sahara_dev *dev,
 800				       struct sahara_sha_reqctx *rctx,
 801				       int start)
 802{
 803	struct scatterlist *sg;
 
 804	unsigned int i;
 805	int ret;
 806
 807	dev->in_sg = rctx->in_sg;
 808
 809	dev->nb_in_sg = sg_nents_for_len(dev->in_sg, rctx->total);
 810	if (dev->nb_in_sg < 0) {
 811		dev_err(dev->device, "Invalid numbers of src SG.\n");
 812		return dev->nb_in_sg;
 813	}
 814	if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) {
 815		dev_err(dev->device, "not enough hw links (%d)\n",
 816			dev->nb_in_sg + dev->nb_out_sg);
 817		return -EINVAL;
 818	}
 819
 820	sg = dev->in_sg;
 821	ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg, DMA_TO_DEVICE);
 822	if (!ret)
 823		return -EFAULT;
 824
 
 825	for (i = start; i < dev->nb_in_sg + start; i++) {
 826		dev->hw_link[i]->len = sg->length;
 827		dev->hw_link[i]->p = sg->dma_address;
 828		if (i == (dev->nb_in_sg + start - 1)) {
 829			dev->hw_link[i]->next = 0;
 830		} else {
 
 831			dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
 832			sg = sg_next(sg);
 833		}
 834	}
 835
 836	return i;
 837}
 838
 839static int sahara_sha_hw_data_descriptor_create(struct sahara_dev *dev,
 840						struct sahara_sha_reqctx *rctx,
 841						struct ahash_request *req,
 842						int index)
 843{
 844	unsigned result_len;
 845	int i = index;
 846
 847	if (rctx->first)
 848		/* Create initial descriptor: #8*/
 849		dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
 850	else
 851		/* Create hash descriptor: #10. Must follow #6. */
 852		dev->hw_desc[index]->hdr = SAHARA_HDR_MDHA_HASH;
 853
 854	dev->hw_desc[index]->len1 = rctx->total;
 855	if (dev->hw_desc[index]->len1 == 0) {
 856		/* if len1 is 0, p1 must be 0, too */
 857		dev->hw_desc[index]->p1 = 0;
 858		rctx->sg_in_idx = 0;
 859	} else {
 860		/* Create input links */
 861		dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
 862		i = sahara_sha_hw_links_create(dev, rctx, index);
 863
 864		rctx->sg_in_idx = index;
 865		if (i < 0)
 866			return i;
 867	}
 868
 869	dev->hw_desc[index]->p2 = dev->hw_phys_link[i];
 870
 871	/* Save the context for the next operation */
 872	result_len = rctx->context_size;
 873	dev->hw_link[i]->p = dev->context_phys_base;
 874
 875	dev->hw_link[i]->len = result_len;
 876	dev->hw_desc[index]->len2 = result_len;
 877
 878	dev->hw_link[i]->next = 0;
 879
 880	return 0;
 881}
 882
 883/*
 884 * Load descriptor aka #6
 885 *
 886 * To load a previously saved context back to the MDHA unit
 887 *
 888 * p1: Saved Context
 889 * p2: NULL
 890 *
 891 */
 892static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
 893						struct sahara_sha_reqctx *rctx,
 894						struct ahash_request *req,
 895						int index)
 896{
 897	dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
 898
 899	dev->hw_desc[index]->len1 = rctx->context_size;
 900	dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
 901	dev->hw_desc[index]->len2 = 0;
 902	dev->hw_desc[index]->p2 = 0;
 903
 904	dev->hw_link[index]->len = rctx->context_size;
 905	dev->hw_link[index]->p = dev->context_phys_base;
 906	dev->hw_link[index]->next = 0;
 907
 908	return 0;
 909}
 910
 911static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes)
 912{
 913	if (!sg || !sg->length)
 914		return nbytes;
 915
 916	while (nbytes && sg) {
 917		if (nbytes <= sg->length) {
 918			sg->length = nbytes;
 919			sg_mark_end(sg);
 920			break;
 921		}
 922		nbytes -= sg->length;
 923		sg = sg_next(sg);
 924	}
 925
 926	return nbytes;
 927}
 928
 929static int sahara_sha_prepare_request(struct ahash_request *req)
 930{
 931	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 932	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
 933	unsigned int hash_later;
 934	unsigned int block_size;
 935	unsigned int len;
 936
 937	block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
 938
 939	/* append bytes from previous operation */
 940	len = rctx->buf_cnt + req->nbytes;
 941
 942	/* only the last transfer can be padded in hardware */
 943	if (!rctx->last && (len < block_size)) {
 944		/* to few data, save for next operation */
 945		scatterwalk_map_and_copy(rctx->buf + rctx->buf_cnt, req->src,
 946					 0, req->nbytes, 0);
 947		rctx->buf_cnt += req->nbytes;
 948
 949		return 0;
 950	}
 951
 952	/* add data from previous operation first */
 953	if (rctx->buf_cnt)
 954		memcpy(rctx->rembuf, rctx->buf, rctx->buf_cnt);
 955
 956	/* data must always be a multiple of block_size */
 957	hash_later = rctx->last ? 0 : len & (block_size - 1);
 958	if (hash_later) {
 959		unsigned int offset = req->nbytes - hash_later;
 960		/* Save remaining bytes for later use */
 961		scatterwalk_map_and_copy(rctx->buf, req->src, offset,
 962					hash_later, 0);
 963	}
 964
 965	/* nbytes should now be multiple of blocksize */
 966	req->nbytes = req->nbytes - hash_later;
 967
 968	sahara_walk_and_recalc(req->src, req->nbytes);
 969
 970	/* have data from previous operation and current */
 971	if (rctx->buf_cnt && req->nbytes) {
 972		sg_init_table(rctx->in_sg_chain, 2);
 973		sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
 974
 975		sg_chain(rctx->in_sg_chain, 2, req->src);
 976
 977		rctx->total = req->nbytes + rctx->buf_cnt;
 978		rctx->in_sg = rctx->in_sg_chain;
 979
 980		req->src = rctx->in_sg_chain;
 981	/* only data from previous operation */
 982	} else if (rctx->buf_cnt) {
 983		if (req->src)
 984			rctx->in_sg = req->src;
 985		else
 986			rctx->in_sg = rctx->in_sg_chain;
 987		/* buf was copied into rembuf above */
 988		sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
 989		rctx->total = rctx->buf_cnt;
 990	/* no data from previous operation */
 991	} else {
 992		rctx->in_sg = req->src;
 993		rctx->total = req->nbytes;
 994		req->src = rctx->in_sg;
 995	}
 996
 997	/* on next call, we only have the remaining data in the buffer */
 998	rctx->buf_cnt = hash_later;
 999
1000	return -EINPROGRESS;
1001}
1002
1003static int sahara_sha_process(struct ahash_request *req)
1004{
1005	struct sahara_dev *dev = dev_ptr;
1006	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1007	int ret;
1008	unsigned long timeout;
1009
1010	ret = sahara_sha_prepare_request(req);
1011	if (!ret)
1012		return ret;
1013
1014	if (rctx->first) {
1015		sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
 
 
 
1016		dev->hw_desc[0]->next = 0;
1017		rctx->first = 0;
1018	} else {
1019		memcpy(dev->context_base, rctx->context, rctx->context_size);
1020
1021		sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
1022		dev->hw_desc[0]->next = dev->hw_phys_desc[1];
1023		sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
 
 
 
1024		dev->hw_desc[1]->next = 0;
1025	}
1026
1027	sahara_dump_descriptors(dev);
1028	sahara_dump_links(dev);
1029
1030	reinit_completion(&dev->dma_completion);
1031
1032	sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
1033
1034	timeout = wait_for_completion_timeout(&dev->dma_completion,
1035				msecs_to_jiffies(SAHARA_TIMEOUT_MS));
 
 
 
 
 
1036	if (!timeout) {
1037		dev_err(dev->device, "SHA timeout\n");
1038		return -ETIMEDOUT;
1039	}
1040
1041	if (rctx->sg_in_idx)
1042		dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
1043			     DMA_TO_DEVICE);
1044
1045	memcpy(rctx->context, dev->context_base, rctx->context_size);
1046
1047	if (req->result)
1048		memcpy(req->result, rctx->context, rctx->digest_size);
1049
1050	return 0;
1051}
1052
1053static int sahara_queue_manage(void *data)
1054{
1055	struct sahara_dev *dev = (struct sahara_dev *)data;
1056	struct crypto_async_request *async_req;
1057	struct crypto_async_request *backlog;
1058	int ret = 0;
1059
1060	do {
1061		__set_current_state(TASK_INTERRUPTIBLE);
1062
1063		mutex_lock(&dev->queue_mutex);
1064		backlog = crypto_get_backlog(&dev->queue);
1065		async_req = crypto_dequeue_request(&dev->queue);
1066		mutex_unlock(&dev->queue_mutex);
 
 
1067
1068		if (backlog)
1069			backlog->complete(backlog, -EINPROGRESS);
1070
1071		if (async_req) {
1072			if (crypto_tfm_alg_type(async_req->tfm) ==
1073			    CRYPTO_ALG_TYPE_AHASH) {
1074				struct ahash_request *req =
1075					ahash_request_cast(async_req);
1076
1077				ret = sahara_sha_process(req);
1078			} else {
1079				struct ablkcipher_request *req =
1080					ablkcipher_request_cast(async_req);
1081
1082				ret = sahara_aes_process(req);
1083			}
1084
1085			async_req->complete(async_req, ret);
1086
1087			continue;
1088		}
1089
1090		schedule();
1091	} while (!kthread_should_stop());
1092
1093	return 0;
1094}
1095
1096static int sahara_sha_enqueue(struct ahash_request *req, int last)
1097{
1098	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1099	struct sahara_dev *dev = dev_ptr;
1100	int ret;
1101
1102	if (!req->nbytes && !last)
1103		return 0;
1104
1105	rctx->last = last;
1106
1107	if (!rctx->active) {
1108		rctx->active = 1;
1109		rctx->first = 1;
1110	}
1111
1112	mutex_lock(&dev->queue_mutex);
1113	ret = crypto_enqueue_request(&dev->queue, &req->base);
1114	mutex_unlock(&dev->queue_mutex);
1115
1116	wake_up_process(dev->kthread);
1117
1118	return ret;
1119}
1120
1121static int sahara_sha_init(struct ahash_request *req)
1122{
1123	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1124	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1125
1126	memset(rctx, 0, sizeof(*rctx));
1127
1128	switch (crypto_ahash_digestsize(tfm)) {
1129	case SHA1_DIGEST_SIZE:
1130		rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA1;
1131		rctx->digest_size = SHA1_DIGEST_SIZE;
1132		break;
1133	case SHA256_DIGEST_SIZE:
1134		rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA256;
1135		rctx->digest_size = SHA256_DIGEST_SIZE;
1136		break;
1137	default:
1138		return -EINVAL;
1139	}
1140
1141	rctx->context_size = rctx->digest_size + 4;
1142	rctx->active = 0;
1143
1144	return 0;
1145}
1146
1147static int sahara_sha_update(struct ahash_request *req)
1148{
1149	return sahara_sha_enqueue(req, 0);
1150}
1151
1152static int sahara_sha_final(struct ahash_request *req)
1153{
1154	req->nbytes = 0;
1155	return sahara_sha_enqueue(req, 1);
1156}
1157
1158static int sahara_sha_finup(struct ahash_request *req)
1159{
1160	return sahara_sha_enqueue(req, 1);
1161}
1162
1163static int sahara_sha_digest(struct ahash_request *req)
1164{
1165	sahara_sha_init(req);
1166
1167	return sahara_sha_finup(req);
1168}
1169
1170static int sahara_sha_export(struct ahash_request *req, void *out)
1171{
1172	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1173
1174	memcpy(out, rctx, sizeof(struct sahara_sha_reqctx));
1175
1176	return 0;
1177}
1178
1179static int sahara_sha_import(struct ahash_request *req, const void *in)
1180{
1181	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1182
1183	memcpy(rctx, in, sizeof(struct sahara_sha_reqctx));
1184
1185	return 0;
1186}
1187
1188static int sahara_sha_cra_init(struct crypto_tfm *tfm)
1189{
1190	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1191				 sizeof(struct sahara_sha_reqctx) +
1192				 SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
1193
1194	return 0;
1195}
1196
1197static struct crypto_alg aes_algs[] = {
1198{
1199	.cra_name		= "ecb(aes)",
1200	.cra_driver_name	= "sahara-ecb-aes",
1201	.cra_priority		= 300,
1202	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
1203			CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1204	.cra_blocksize		= AES_BLOCK_SIZE,
1205	.cra_ctxsize		= sizeof(struct sahara_ctx),
1206	.cra_alignmask		= 0x0,
1207	.cra_type		= &crypto_ablkcipher_type,
1208	.cra_module		= THIS_MODULE,
1209	.cra_init		= sahara_aes_cra_init,
1210	.cra_exit		= sahara_aes_cra_exit,
1211	.cra_u.ablkcipher = {
1212		.min_keysize	= AES_MIN_KEY_SIZE ,
1213		.max_keysize	= AES_MAX_KEY_SIZE,
1214		.setkey		= sahara_aes_setkey,
1215		.encrypt	= sahara_aes_ecb_encrypt,
1216		.decrypt	= sahara_aes_ecb_decrypt,
1217	}
 
 
1218}, {
1219	.cra_name		= "cbc(aes)",
1220	.cra_driver_name	= "sahara-cbc-aes",
1221	.cra_priority		= 300,
1222	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
1223			CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1224	.cra_blocksize		= AES_BLOCK_SIZE,
1225	.cra_ctxsize		= sizeof(struct sahara_ctx),
1226	.cra_alignmask		= 0x0,
1227	.cra_type		= &crypto_ablkcipher_type,
1228	.cra_module		= THIS_MODULE,
1229	.cra_init		= sahara_aes_cra_init,
1230	.cra_exit		= sahara_aes_cra_exit,
1231	.cra_u.ablkcipher = {
1232		.min_keysize	= AES_MIN_KEY_SIZE ,
1233		.max_keysize	= AES_MAX_KEY_SIZE,
1234		.ivsize		= AES_BLOCK_SIZE,
1235		.setkey		= sahara_aes_setkey,
1236		.encrypt	= sahara_aes_cbc_encrypt,
1237		.decrypt	= sahara_aes_cbc_decrypt,
1238	}
 
 
1239}
1240};
1241
1242static struct ahash_alg sha_v3_algs[] = {
1243{
1244	.init		= sahara_sha_init,
1245	.update		= sahara_sha_update,
1246	.final		= sahara_sha_final,
1247	.finup		= sahara_sha_finup,
1248	.digest		= sahara_sha_digest,
1249	.export		= sahara_sha_export,
1250	.import		= sahara_sha_import,
1251	.halg.digestsize	= SHA1_DIGEST_SIZE,
1252	.halg.statesize         = sizeof(struct sahara_sha_reqctx),
1253	.halg.base	= {
1254		.cra_name		= "sha1",
1255		.cra_driver_name	= "sahara-sha1",
1256		.cra_priority		= 300,
1257		.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
1258						CRYPTO_ALG_ASYNC |
1259						CRYPTO_ALG_NEED_FALLBACK,
1260		.cra_blocksize		= SHA1_BLOCK_SIZE,
1261		.cra_ctxsize		= sizeof(struct sahara_ctx),
1262		.cra_alignmask		= 0,
1263		.cra_module		= THIS_MODULE,
1264		.cra_init		= sahara_sha_cra_init,
1265	}
 
 
 
 
1266},
1267};
1268
1269static struct ahash_alg sha_v4_algs[] = {
1270{
1271	.init		= sahara_sha_init,
1272	.update		= sahara_sha_update,
1273	.final		= sahara_sha_final,
1274	.finup		= sahara_sha_finup,
1275	.digest		= sahara_sha_digest,
1276	.export		= sahara_sha_export,
1277	.import		= sahara_sha_import,
1278	.halg.digestsize	= SHA256_DIGEST_SIZE,
1279	.halg.statesize         = sizeof(struct sahara_sha_reqctx),
1280	.halg.base	= {
1281		.cra_name		= "sha256",
1282		.cra_driver_name	= "sahara-sha256",
1283		.cra_priority		= 300,
1284		.cra_flags		= CRYPTO_ALG_TYPE_AHASH |
1285						CRYPTO_ALG_ASYNC |
1286						CRYPTO_ALG_NEED_FALLBACK,
1287		.cra_blocksize		= SHA256_BLOCK_SIZE,
1288		.cra_ctxsize		= sizeof(struct sahara_ctx),
1289		.cra_alignmask		= 0,
1290		.cra_module		= THIS_MODULE,
1291		.cra_init		= sahara_sha_cra_init,
1292	}
 
 
 
 
1293},
1294};
1295
1296static irqreturn_t sahara_irq_handler(int irq, void *data)
1297{
1298	struct sahara_dev *dev = (struct sahara_dev *)data;
1299	unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
1300	unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
1301
1302	sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR,
1303		     SAHARA_REG_CMD);
1304
1305	sahara_decode_status(dev, stat);
1306
1307	if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY) {
1308		return IRQ_NONE;
1309	} else if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_COMPLETE) {
1310		dev->error = 0;
1311	} else {
1312		sahara_decode_error(dev, err);
1313		dev->error = -EINVAL;
1314	}
1315
1316	complete(&dev->dma_completion);
1317
1318	return IRQ_HANDLED;
1319}
1320
1321
1322static int sahara_register_algs(struct sahara_dev *dev)
1323{
1324	int err;
1325	unsigned int i, j, k, l;
1326
1327	for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
1328		INIT_LIST_HEAD(&aes_algs[i].cra_list);
1329		err = crypto_register_alg(&aes_algs[i]);
1330		if (err)
1331			goto err_aes_algs;
1332	}
 
 
1333
1334	for (k = 0; k < ARRAY_SIZE(sha_v3_algs); k++) {
1335		err = crypto_register_ahash(&sha_v3_algs[k]);
 
1336		if (err)
1337			goto err_sha_v3_algs;
1338	}
1339
1340	if (dev->version > SAHARA_VERSION_3)
1341		for (l = 0; l < ARRAY_SIZE(sha_v4_algs); l++) {
1342			err = crypto_register_ahash(&sha_v4_algs[l]);
1343			if (err)
1344				goto err_sha_v4_algs;
1345		}
1346
1347	return 0;
1348
1349err_sha_v4_algs:
1350	for (j = 0; j < l; j++)
1351		crypto_unregister_ahash(&sha_v4_algs[j]);
1352
1353err_sha_v3_algs:
1354	for (j = 0; j < k; j++)
1355		crypto_unregister_ahash(&sha_v4_algs[j]);
1356
1357err_aes_algs:
1358	for (j = 0; j < i; j++)
1359		crypto_unregister_alg(&aes_algs[j]);
1360
1361	return err;
1362}
1363
1364static void sahara_unregister_algs(struct sahara_dev *dev)
1365{
1366	unsigned int i;
1367
1368	for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1369		crypto_unregister_alg(&aes_algs[i]);
1370
1371	for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
1372		crypto_unregister_ahash(&sha_v3_algs[i]);
1373
1374	if (dev->version > SAHARA_VERSION_3)
1375		for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
1376			crypto_unregister_ahash(&sha_v4_algs[i]);
1377}
1378
1379static struct platform_device_id sahara_platform_ids[] = {
1380	{ .name = "sahara-imx27" },
1381	{ /* sentinel */ }
1382};
1383MODULE_DEVICE_TABLE(platform, sahara_platform_ids);
1384
1385static struct of_device_id sahara_dt_ids[] = {
1386	{ .compatible = "fsl,imx53-sahara" },
1387	{ .compatible = "fsl,imx27-sahara" },
1388	{ /* sentinel */ }
1389};
1390MODULE_DEVICE_TABLE(of, sahara_dt_ids);
1391
1392static int sahara_probe(struct platform_device *pdev)
1393{
1394	struct sahara_dev *dev;
1395	struct resource *res;
1396	u32 version;
1397	int irq;
1398	int err;
1399	int i;
1400
1401	dev = devm_kzalloc(&pdev->dev, sizeof(struct sahara_dev), GFP_KERNEL);
1402	if (dev == NULL) {
1403		dev_err(&pdev->dev, "unable to alloc data struct.\n");
1404		return -ENOMEM;
1405	}
1406
1407	dev->device = &pdev->dev;
1408	platform_set_drvdata(pdev, dev);
1409
1410	/* Get the base address */
1411	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1412	dev->regs_base = devm_ioremap_resource(&pdev->dev, res);
1413	if (IS_ERR(dev->regs_base))
1414		return PTR_ERR(dev->regs_base);
1415
1416	/* Get the IRQ */
1417	irq = platform_get_irq(pdev,  0);
1418	if (irq < 0) {
1419		dev_err(&pdev->dev, "failed to get irq resource\n");
1420		return irq;
1421	}
1422
1423	err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
1424			       0, dev_name(&pdev->dev), dev);
1425	if (err) {
1426		dev_err(&pdev->dev, "failed to request irq\n");
1427		return err;
1428	}
1429
1430	/* clocks */
1431	dev->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1432	if (IS_ERR(dev->clk_ipg)) {
1433		dev_err(&pdev->dev, "Could not get ipg clock\n");
1434		return PTR_ERR(dev->clk_ipg);
1435	}
1436
1437	dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
1438	if (IS_ERR(dev->clk_ahb)) {
1439		dev_err(&pdev->dev, "Could not get ahb clock\n");
1440		return PTR_ERR(dev->clk_ahb);
1441	}
1442
1443	/* Allocate HW descriptors */
1444	dev->hw_desc[0] = dmam_alloc_coherent(&pdev->dev,
1445			SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1446			&dev->hw_phys_desc[0], GFP_KERNEL);
1447	if (!dev->hw_desc[0]) {
1448		dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
1449		return -ENOMEM;
1450	}
1451	dev->hw_desc[1] = dev->hw_desc[0] + 1;
1452	dev->hw_phys_desc[1] = dev->hw_phys_desc[0] +
1453				sizeof(struct sahara_hw_desc);
1454
1455	/* Allocate space for iv and key */
1456	dev->key_base = dmam_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
1457				&dev->key_phys_base, GFP_KERNEL);
1458	if (!dev->key_base) {
1459		dev_err(&pdev->dev, "Could not allocate memory for key\n");
1460		return -ENOMEM;
1461	}
1462	dev->iv_base = dev->key_base + AES_KEYSIZE_128;
1463	dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
1464
1465	/* Allocate space for context: largest digest + message length field */
1466	dev->context_base = dmam_alloc_coherent(&pdev->dev,
1467					SHA256_DIGEST_SIZE + 4,
1468					&dev->context_phys_base, GFP_KERNEL);
1469	if (!dev->context_base) {
1470		dev_err(&pdev->dev, "Could not allocate memory for MDHA context\n");
1471		return -ENOMEM;
1472	}
1473
1474	/* Allocate space for HW links */
1475	dev->hw_link[0] = dmam_alloc_coherent(&pdev->dev,
1476			SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
1477			&dev->hw_phys_link[0], GFP_KERNEL);
1478	if (!dev->hw_link[0]) {
1479		dev_err(&pdev->dev, "Could not allocate hw links\n");
1480		return -ENOMEM;
1481	}
1482	for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
1483		dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
1484					sizeof(struct sahara_hw_link);
1485		dev->hw_link[i] = dev->hw_link[i - 1] + 1;
1486	}
1487
1488	crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
1489
1490	spin_lock_init(&dev->lock);
1491	mutex_init(&dev->queue_mutex);
 
1492
1493	dev_ptr = dev;
1494
1495	dev->kthread = kthread_run(sahara_queue_manage, dev, "sahara_crypto");
1496	if (IS_ERR(dev->kthread)) {
1497		return PTR_ERR(dev->kthread);
1498	}
1499
1500	init_completion(&dev->dma_completion);
1501
1502	err = clk_prepare_enable(dev->clk_ipg);
1503	if (err)
1504		return err;
1505	err = clk_prepare_enable(dev->clk_ahb);
1506	if (err)
1507		goto clk_ipg_disable;
1508
1509	version = sahara_read(dev, SAHARA_REG_VERSION);
1510	if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) {
1511		if (version != SAHARA_VERSION_3)
1512			err = -ENODEV;
1513	} else if (of_device_is_compatible(pdev->dev.of_node,
1514			"fsl,imx53-sahara")) {
1515		if (((version >> 8) & 0xff) != SAHARA_VERSION_4)
1516			err = -ENODEV;
1517		version = (version >> 8) & 0xff;
1518	}
1519	if (err == -ENODEV) {
1520		dev_err(&pdev->dev, "SAHARA version %d not supported\n",
1521				version);
1522		goto err_algs;
1523	}
1524
1525	dev->version = version;
1526
1527	sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH,
1528		     SAHARA_REG_CMD);
1529	sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) |
1530			SAHARA_CONTROL_SET_MAXBURST(8) |
1531			SAHARA_CONTROL_RNG_AUTORSD |
1532			SAHARA_CONTROL_ENABLE_INT,
1533			SAHARA_REG_CONTROL);
1534
1535	err = sahara_register_algs(dev);
1536	if (err)
1537		goto err_algs;
1538
1539	dev_info(&pdev->dev, "SAHARA version %d initialized\n", version);
1540
1541	return 0;
1542
1543err_algs:
1544	kthread_stop(dev->kthread);
1545	dev_ptr = NULL;
1546	clk_disable_unprepare(dev->clk_ahb);
1547clk_ipg_disable:
1548	clk_disable_unprepare(dev->clk_ipg);
1549
1550	return err;
1551}
1552
1553static int sahara_remove(struct platform_device *pdev)
1554{
1555	struct sahara_dev *dev = platform_get_drvdata(pdev);
1556
1557	kthread_stop(dev->kthread);
1558
1559	sahara_unregister_algs(dev);
1560
1561	clk_disable_unprepare(dev->clk_ipg);
1562	clk_disable_unprepare(dev->clk_ahb);
1563
1564	dev_ptr = NULL;
1565
1566	return 0;
1567}
1568
1569static struct platform_driver sahara_driver = {
1570	.probe		= sahara_probe,
1571	.remove		= sahara_remove,
1572	.driver		= {
1573		.name	= SAHARA_NAME,
1574		.of_match_table = sahara_dt_ids,
1575	},
1576	.id_table = sahara_platform_ids,
1577};
1578
1579module_platform_driver(sahara_driver);
1580
1581MODULE_LICENSE("GPL");
1582MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
1583MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de>");
1584MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");