Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Cryptographic API.
   4 *
   5 * Support for SAHARA cryptographic accelerator.
   6 *
   7 * Copyright (c) 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
   8 * Copyright (c) 2013 Vista Silicon S.L.
   9 * Author: Javier Martin <javier.martin@vista-silicon.com>
  10 *
 
 
 
 
  11 * Based on omap-aes.c and tegra-aes.c
  12 */
  13
 
  14#include <crypto/aes.h>
  15#include <crypto/internal/hash.h>
  16#include <crypto/internal/skcipher.h>
  17#include <crypto/scatterwalk.h>
  18#include <crypto/sha.h>
  19
  20#include <linux/clk.h>
  21#include <linux/crypto.h>
  22#include <linux/interrupt.h>
  23#include <linux/io.h>
  24#include <linux/irq.h>
  25#include <linux/kernel.h>
  26#include <linux/kthread.h>
  27#include <linux/module.h>
  28#include <linux/mutex.h>
  29#include <linux/of.h>
  30#include <linux/of_device.h>
  31#include <linux/platform_device.h>
  32
  33#define SHA_BUFFER_LEN		PAGE_SIZE
  34#define SAHARA_MAX_SHA_BLOCK_SIZE	SHA256_BLOCK_SIZE
  35
  36#define SAHARA_NAME "sahara"
  37#define SAHARA_VERSION_3	3
  38#define SAHARA_VERSION_4	4
  39#define SAHARA_TIMEOUT_MS	1000
  40#define SAHARA_MAX_HW_DESC	2
  41#define SAHARA_MAX_HW_LINK	20
  42
  43#define FLAGS_MODE_MASK		0x000f
  44#define FLAGS_ENCRYPT		BIT(0)
  45#define FLAGS_CBC		BIT(1)
  46#define FLAGS_NEW_KEY		BIT(3)
 
  47
  48#define SAHARA_HDR_BASE			0x00800000
  49#define SAHARA_HDR_SKHA_ALG_AES	0
  50#define SAHARA_HDR_SKHA_OP_ENC		(1 << 2)
  51#define SAHARA_HDR_SKHA_MODE_ECB	(0 << 3)
  52#define SAHARA_HDR_SKHA_MODE_CBC	(1 << 3)
  53#define SAHARA_HDR_FORM_DATA		(5 << 16)
  54#define SAHARA_HDR_FORM_KEY		(8 << 16)
  55#define SAHARA_HDR_LLO			(1 << 24)
  56#define SAHARA_HDR_CHA_SKHA		(1 << 28)
  57#define SAHARA_HDR_CHA_MDHA		(2 << 28)
  58#define SAHARA_HDR_PARITY_BIT		(1 << 31)
  59
  60#define SAHARA_HDR_MDHA_SET_MODE_MD_KEY	0x20880000
  61#define SAHARA_HDR_MDHA_SET_MODE_HASH	0x208D0000
  62#define SAHARA_HDR_MDHA_HASH		0xA0850000
  63#define SAHARA_HDR_MDHA_STORE_DIGEST	0x20820000
  64#define SAHARA_HDR_MDHA_ALG_SHA1	0
  65#define SAHARA_HDR_MDHA_ALG_MD5		1
  66#define SAHARA_HDR_MDHA_ALG_SHA256	2
  67#define SAHARA_HDR_MDHA_ALG_SHA224	3
  68#define SAHARA_HDR_MDHA_PDATA		(1 << 2)
  69#define SAHARA_HDR_MDHA_HMAC		(1 << 3)
  70#define SAHARA_HDR_MDHA_INIT		(1 << 5)
  71#define SAHARA_HDR_MDHA_IPAD		(1 << 6)
  72#define SAHARA_HDR_MDHA_OPAD		(1 << 7)
  73#define SAHARA_HDR_MDHA_SWAP		(1 << 8)
  74#define SAHARA_HDR_MDHA_MAC_FULL	(1 << 9)
  75#define SAHARA_HDR_MDHA_SSL		(1 << 10)
  76
  77/* SAHARA can only process one request at a time */
  78#define SAHARA_QUEUE_LENGTH	1
  79
  80#define SAHARA_REG_VERSION	0x00
  81#define SAHARA_REG_DAR		0x04
  82#define SAHARA_REG_CONTROL	0x08
  83#define		SAHARA_CONTROL_SET_THROTTLE(x)	(((x) & 0xff) << 24)
  84#define		SAHARA_CONTROL_SET_MAXBURST(x)	(((x) & 0xff) << 16)
  85#define		SAHARA_CONTROL_RNG_AUTORSD	(1 << 7)
  86#define		SAHARA_CONTROL_ENABLE_INT	(1 << 4)
  87#define SAHARA_REG_CMD		0x0C
  88#define		SAHARA_CMD_RESET		(1 << 0)
  89#define		SAHARA_CMD_CLEAR_INT		(1 << 8)
  90#define		SAHARA_CMD_CLEAR_ERR		(1 << 9)
  91#define		SAHARA_CMD_SINGLE_STEP		(1 << 10)
  92#define		SAHARA_CMD_MODE_BATCH		(1 << 16)
  93#define		SAHARA_CMD_MODE_DEBUG		(1 << 18)
  94#define	SAHARA_REG_STATUS	0x10
  95#define		SAHARA_STATUS_GET_STATE(x)	((x) & 0x7)
  96#define			SAHARA_STATE_IDLE	0
  97#define			SAHARA_STATE_BUSY	1
  98#define			SAHARA_STATE_ERR	2
  99#define			SAHARA_STATE_FAULT	3
 100#define			SAHARA_STATE_COMPLETE	4
 101#define			SAHARA_STATE_COMP_FLAG	(1 << 2)
 102#define		SAHARA_STATUS_DAR_FULL		(1 << 3)
 103#define		SAHARA_STATUS_ERROR		(1 << 4)
 104#define		SAHARA_STATUS_SECURE		(1 << 5)
 105#define		SAHARA_STATUS_FAIL		(1 << 6)
 106#define		SAHARA_STATUS_INIT		(1 << 7)
 107#define		SAHARA_STATUS_RNG_RESEED	(1 << 8)
 108#define		SAHARA_STATUS_ACTIVE_RNG	(1 << 9)
 109#define		SAHARA_STATUS_ACTIVE_MDHA	(1 << 10)
 110#define		SAHARA_STATUS_ACTIVE_SKHA	(1 << 11)
 111#define		SAHARA_STATUS_MODE_BATCH	(1 << 16)
 112#define		SAHARA_STATUS_MODE_DEDICATED	(1 << 17)
 113#define		SAHARA_STATUS_MODE_DEBUG	(1 << 18)
 114#define		SAHARA_STATUS_GET_ISTATE(x)	(((x) >> 24) & 0xff)
 115#define SAHARA_REG_ERRSTATUS	0x14
 116#define		SAHARA_ERRSTATUS_GET_SOURCE(x)	((x) & 0xf)
 117#define			SAHARA_ERRSOURCE_CHA	14
 118#define			SAHARA_ERRSOURCE_DMA	15
 119#define		SAHARA_ERRSTATUS_DMA_DIR	(1 << 8)
 120#define		SAHARA_ERRSTATUS_GET_DMASZ(x)(((x) >> 9) & 0x3)
 121#define		SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7)
 122#define		SAHARA_ERRSTATUS_GET_CHASRC(x)	(((x) >> 16) & 0xfff)
 123#define		SAHARA_ERRSTATUS_GET_CHAERR(x)	(((x) >> 28) & 0x3)
 124#define SAHARA_REG_FADDR	0x18
 125#define SAHARA_REG_CDAR		0x1C
 126#define SAHARA_REG_IDAR		0x20
 127
 128struct sahara_hw_desc {
 129	u32	hdr;
 130	u32	len1;
 131	u32	p1;
 132	u32	len2;
 133	u32	p2;
 134	u32	next;
 135};
 136
 137struct sahara_hw_link {
 138	u32	len;
 139	u32	p;
 140	u32	next;
 141};
 142
 143struct sahara_ctx {
 
 144	unsigned long flags;
 145
 146	/* AES-specific context */
 147	int keylen;
 148	u8 key[AES_KEYSIZE_128];
 149	struct crypto_skcipher *fallback;
 150};
 151
 152struct sahara_aes_reqctx {
 153	unsigned long mode;
 154	struct skcipher_request fallback_req;	// keep at the end
 155};
 156
 157/*
 158 * struct sahara_sha_reqctx - private data per request
 159 * @buf: holds data for requests smaller than block_size
 160 * @rembuf: used to prepare one block_size-aligned request
 161 * @context: hw-specific context for request. Digest is extracted from this
 162 * @mode: specifies what type of hw-descriptor needs to be built
 163 * @digest_size: length of digest for this request
 164 * @context_size: length of hw-context for this request.
 165 *                Always digest_size + 4
 166 * @buf_cnt: number of bytes saved in buf
 167 * @sg_in_idx: number of hw links
 168 * @in_sg: scatterlist for input data
 169 * @in_sg_chain: scatterlists for chained input data
 170 * @total: total number of bytes for transfer
 171 * @last: is this the last block
 172 * @first: is this the first block
 173 * @active: inside a transfer
 174 */
 175struct sahara_sha_reqctx {
 176	u8			buf[SAHARA_MAX_SHA_BLOCK_SIZE];
 177	u8			rembuf[SAHARA_MAX_SHA_BLOCK_SIZE];
 178	u8			context[SHA256_DIGEST_SIZE + 4];
 179	unsigned int		mode;
 180	unsigned int		digest_size;
 181	unsigned int		context_size;
 182	unsigned int		buf_cnt;
 183	unsigned int		sg_in_idx;
 184	struct scatterlist	*in_sg;
 185	struct scatterlist	in_sg_chain[2];
 186	size_t			total;
 187	unsigned int		last;
 188	unsigned int		first;
 189	unsigned int		active;
 190};
 191
 192struct sahara_dev {
 193	struct device		*device;
 194	unsigned int		version;
 195	void __iomem		*regs_base;
 196	struct clk		*clk_ipg;
 197	struct clk		*clk_ahb;
 198	struct mutex		queue_mutex;
 199	struct task_struct	*kthread;
 200	struct completion	dma_completion;
 201
 202	struct sahara_ctx	*ctx;
 
 203	struct crypto_queue	queue;
 204	unsigned long		flags;
 205
 
 
 
 206	struct sahara_hw_desc	*hw_desc[SAHARA_MAX_HW_DESC];
 207	dma_addr_t		hw_phys_desc[SAHARA_MAX_HW_DESC];
 208
 209	u8			*key_base;
 210	dma_addr_t		key_phys_base;
 211
 212	u8			*iv_base;
 213	dma_addr_t		iv_phys_base;
 214
 215	u8			*context_base;
 216	dma_addr_t		context_phys_base;
 217
 218	struct sahara_hw_link	*hw_link[SAHARA_MAX_HW_LINK];
 219	dma_addr_t		hw_phys_link[SAHARA_MAX_HW_LINK];
 220
 
 221	size_t			total;
 222	struct scatterlist	*in_sg;
 223	int		nb_in_sg;
 224	struct scatterlist	*out_sg;
 225	int		nb_out_sg;
 226
 227	u32			error;
 
 228};
 229
 230static struct sahara_dev *dev_ptr;
 231
 232static inline void sahara_write(struct sahara_dev *dev, u32 data, u32 reg)
 233{
 234	writel(data, dev->regs_base + reg);
 235}
 236
 237static inline unsigned int sahara_read(struct sahara_dev *dev, u32 reg)
 238{
 239	return readl(dev->regs_base + reg);
 240}
 241
 242static u32 sahara_aes_key_hdr(struct sahara_dev *dev)
 243{
 244	u32 hdr = SAHARA_HDR_BASE | SAHARA_HDR_SKHA_ALG_AES |
 245			SAHARA_HDR_FORM_KEY | SAHARA_HDR_LLO |
 246			SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
 247
 248	if (dev->flags & FLAGS_CBC) {
 249		hdr |= SAHARA_HDR_SKHA_MODE_CBC;
 250		hdr ^= SAHARA_HDR_PARITY_BIT;
 251	}
 252
 253	if (dev->flags & FLAGS_ENCRYPT) {
 254		hdr |= SAHARA_HDR_SKHA_OP_ENC;
 255		hdr ^= SAHARA_HDR_PARITY_BIT;
 256	}
 257
 258	return hdr;
 259}
 260
 261static u32 sahara_aes_data_link_hdr(struct sahara_dev *dev)
 262{
 263	return SAHARA_HDR_BASE | SAHARA_HDR_FORM_DATA |
 264			SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
 265}
 266
 267static const char *sahara_err_src[16] = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 268	"No error",
 269	"Header error",
 270	"Descriptor length error",
 271	"Descriptor length or pointer error",
 272	"Link length error",
 273	"Link pointer error",
 274	"Input buffer error",
 275	"Output buffer error",
 276	"Output buffer starvation",
 277	"Internal state fault",
 278	"General descriptor problem",
 279	"Reserved",
 280	"Descriptor address error",
 281	"Link address error",
 282	"CHA error",
 283	"DMA error"
 284};
 285
 286static const char *sahara_err_dmasize[4] = {
 287	"Byte transfer",
 288	"Half-word transfer",
 289	"Word transfer",
 290	"Reserved"
 291};
 292
 293static const char *sahara_err_dmasrc[8] = {
 294	"No error",
 295	"AHB bus error",
 296	"Internal IP bus error",
 297	"Parity error",
 298	"DMA crosses 256 byte boundary",
 299	"DMA is busy",
 300	"Reserved",
 301	"DMA HW error"
 302};
 303
 304static const char *sahara_cha_errsrc[12] = {
 305	"Input buffer non-empty",
 306	"Illegal address",
 307	"Illegal mode",
 308	"Illegal data size",
 309	"Illegal key size",
 310	"Write during processing",
 311	"CTX read during processing",
 312	"HW error",
 313	"Input buffer disabled/underflow",
 314	"Output buffer disabled/overflow",
 315	"DES key parity error",
 316	"Reserved"
 317};
 318
 319static const char *sahara_cha_err[4] = { "No error", "SKHA", "MDHA", "RNG" };
 320
 321static void sahara_decode_error(struct sahara_dev *dev, unsigned int error)
 322{
 323	u8 source = SAHARA_ERRSTATUS_GET_SOURCE(error);
 324	u16 chasrc = ffs(SAHARA_ERRSTATUS_GET_CHASRC(error));
 325
 326	dev_err(dev->device, "%s: Error Register = 0x%08x\n", __func__, error);
 327
 328	dev_err(dev->device, "	- %s.\n", sahara_err_src[source]);
 329
 330	if (source == SAHARA_ERRSOURCE_DMA) {
 331		if (error & SAHARA_ERRSTATUS_DMA_DIR)
 332			dev_err(dev->device, "		* DMA read.\n");
 333		else
 334			dev_err(dev->device, "		* DMA write.\n");
 335
 336		dev_err(dev->device, "		* %s.\n",
 337		       sahara_err_dmasize[SAHARA_ERRSTATUS_GET_DMASZ(error)]);
 338		dev_err(dev->device, "		* %s.\n",
 339		       sahara_err_dmasrc[SAHARA_ERRSTATUS_GET_DMASRC(error)]);
 340	} else if (source == SAHARA_ERRSOURCE_CHA) {
 341		dev_err(dev->device, "		* %s.\n",
 342			sahara_cha_errsrc[chasrc]);
 343		dev_err(dev->device, "		* %s.\n",
 344		       sahara_cha_err[SAHARA_ERRSTATUS_GET_CHAERR(error)]);
 345	}
 346	dev_err(dev->device, "\n");
 347}
 348
 349static const char *sahara_state[4] = { "Idle", "Busy", "Error", "HW Fault" };
 350
 351static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
 352{
 353	u8 state;
 354
 355	if (!__is_defined(DEBUG))
 356		return;
 357
 358	state = SAHARA_STATUS_GET_STATE(status);
 359
 360	dev_dbg(dev->device, "%s: Status Register = 0x%08x\n",
 361		__func__, status);
 362
 363	dev_dbg(dev->device, "	- State = %d:\n", state);
 364	if (state & SAHARA_STATE_COMP_FLAG)
 365		dev_dbg(dev->device, "		* Descriptor completed. IRQ pending.\n");
 366
 367	dev_dbg(dev->device, "		* %s.\n",
 368	       sahara_state[state & ~SAHARA_STATE_COMP_FLAG]);
 369
 370	if (status & SAHARA_STATUS_DAR_FULL)
 371		dev_dbg(dev->device, "	- DAR Full.\n");
 372	if (status & SAHARA_STATUS_ERROR)
 373		dev_dbg(dev->device, "	- Error.\n");
 374	if (status & SAHARA_STATUS_SECURE)
 375		dev_dbg(dev->device, "	- Secure.\n");
 376	if (status & SAHARA_STATUS_FAIL)
 377		dev_dbg(dev->device, "	- Fail.\n");
 378	if (status & SAHARA_STATUS_RNG_RESEED)
 379		dev_dbg(dev->device, "	- RNG Reseed Request.\n");
 380	if (status & SAHARA_STATUS_ACTIVE_RNG)
 381		dev_dbg(dev->device, "	- RNG Active.\n");
 382	if (status & SAHARA_STATUS_ACTIVE_MDHA)
 383		dev_dbg(dev->device, "	- MDHA Active.\n");
 384	if (status & SAHARA_STATUS_ACTIVE_SKHA)
 385		dev_dbg(dev->device, "	- SKHA Active.\n");
 386
 387	if (status & SAHARA_STATUS_MODE_BATCH)
 388		dev_dbg(dev->device, "	- Batch Mode.\n");
 389	else if (status & SAHARA_STATUS_MODE_DEDICATED)
 390		dev_dbg(dev->device, "	- Dedicated Mode.\n");
 391	else if (status & SAHARA_STATUS_MODE_DEBUG)
 392		dev_dbg(dev->device, "	- Debug Mode.\n");
 393
 394	dev_dbg(dev->device, "	- Internal state = 0x%02x\n",
 395	       SAHARA_STATUS_GET_ISTATE(status));
 396
 397	dev_dbg(dev->device, "Current DAR: 0x%08x\n",
 398		sahara_read(dev, SAHARA_REG_CDAR));
 399	dev_dbg(dev->device, "Initial DAR: 0x%08x\n\n",
 400		sahara_read(dev, SAHARA_REG_IDAR));
 401}
 402
 403static void sahara_dump_descriptors(struct sahara_dev *dev)
 404{
 405	int i;
 406
 407	if (!__is_defined(DEBUG))
 408		return;
 409
 410	for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
 411		dev_dbg(dev->device, "Descriptor (%d) (%pad):\n",
 412			i, &dev->hw_phys_desc[i]);
 413		dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr);
 414		dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1);
 415		dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1);
 416		dev_dbg(dev->device, "\tlen2 = %u\n", dev->hw_desc[i]->len2);
 417		dev_dbg(dev->device, "\tp2 = 0x%08x\n", dev->hw_desc[i]->p2);
 418		dev_dbg(dev->device, "\tnext = 0x%08x\n",
 419			dev->hw_desc[i]->next);
 420	}
 421	dev_dbg(dev->device, "\n");
 422}
 423
 424static void sahara_dump_links(struct sahara_dev *dev)
 425{
 426	int i;
 427
 428	if (!__is_defined(DEBUG))
 429		return;
 430
 431	for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
 432		dev_dbg(dev->device, "Link (%d) (%pad):\n",
 433			i, &dev->hw_phys_link[i]);
 434		dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len);
 435		dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p);
 436		dev_dbg(dev->device, "\tnext = 0x%08x\n",
 437			dev->hw_link[i]->next);
 438	}
 439	dev_dbg(dev->device, "\n");
 440}
 441
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 442static int sahara_hw_descriptor_create(struct sahara_dev *dev)
 443{
 444	struct sahara_ctx *ctx = dev->ctx;
 445	struct scatterlist *sg;
 446	int ret;
 447	int i, j;
 448	int idx = 0;
 449
 450	/* Copy new key if necessary */
 451	if (ctx->flags & FLAGS_NEW_KEY) {
 452		memcpy(dev->key_base, ctx->key, ctx->keylen);
 453		ctx->flags &= ~FLAGS_NEW_KEY;
 454
 455		if (dev->flags & FLAGS_CBC) {
 456			dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
 457			dev->hw_desc[idx]->p1 = dev->iv_phys_base;
 458		} else {
 459			dev->hw_desc[idx]->len1 = 0;
 460			dev->hw_desc[idx]->p1 = 0;
 461		}
 462		dev->hw_desc[idx]->len2 = ctx->keylen;
 463		dev->hw_desc[idx]->p2 = dev->key_phys_base;
 464		dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
 465
 466		dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
 467
 468		idx++;
 469	}
 
 470
 471	dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total);
 472	if (dev->nb_in_sg < 0) {
 473		dev_err(dev->device, "Invalid numbers of src SG.\n");
 474		return dev->nb_in_sg;
 475	}
 476	dev->nb_out_sg = sg_nents_for_len(dev->out_sg, dev->total);
 477	if (dev->nb_out_sg < 0) {
 478		dev_err(dev->device, "Invalid numbers of dst SG.\n");
 479		return dev->nb_out_sg;
 480	}
 481	if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
 482		dev_err(dev->device, "not enough hw links (%d)\n",
 483			dev->nb_in_sg + dev->nb_out_sg);
 484		return -EINVAL;
 485	}
 486
 487	ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
 488			 DMA_TO_DEVICE);
 489	if (ret != dev->nb_in_sg) {
 490		dev_err(dev->device, "couldn't map in sg\n");
 491		goto unmap_in;
 492	}
 493	ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
 494			 DMA_FROM_DEVICE);
 495	if (ret != dev->nb_out_sg) {
 496		dev_err(dev->device, "couldn't map out sg\n");
 497		goto unmap_out;
 498	}
 499
 500	/* Create input links */
 501	dev->hw_desc[idx]->p1 = dev->hw_phys_link[0];
 502	sg = dev->in_sg;
 503	for (i = 0; i < dev->nb_in_sg; i++) {
 504		dev->hw_link[i]->len = sg->length;
 505		dev->hw_link[i]->p = sg->dma_address;
 506		if (i == (dev->nb_in_sg - 1)) {
 507			dev->hw_link[i]->next = 0;
 508		} else {
 509			dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
 510			sg = sg_next(sg);
 511		}
 512	}
 513
 514	/* Create output links */
 515	dev->hw_desc[idx]->p2 = dev->hw_phys_link[i];
 516	sg = dev->out_sg;
 517	for (j = i; j < dev->nb_out_sg + i; j++) {
 518		dev->hw_link[j]->len = sg->length;
 519		dev->hw_link[j]->p = sg->dma_address;
 520		if (j == (dev->nb_out_sg + i - 1)) {
 521			dev->hw_link[j]->next = 0;
 522		} else {
 523			dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
 524			sg = sg_next(sg);
 525		}
 526	}
 527
 528	/* Fill remaining fields of hw_desc[1] */
 529	dev->hw_desc[idx]->hdr = sahara_aes_data_link_hdr(dev);
 530	dev->hw_desc[idx]->len1 = dev->total;
 531	dev->hw_desc[idx]->len2 = dev->total;
 532	dev->hw_desc[idx]->next = 0;
 533
 534	sahara_dump_descriptors(dev);
 535	sahara_dump_links(dev);
 536
 
 
 
 537	sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
 538
 539	return 0;
 540
 541unmap_out:
 542	dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
 543		DMA_FROM_DEVICE);
 544unmap_in:
 545	dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
 546		DMA_TO_DEVICE);
 547
 548	return -EINVAL;
 549}
 550
 551static int sahara_aes_process(struct skcipher_request *req)
 552{
 553	struct sahara_dev *dev = dev_ptr;
 
 554	struct sahara_ctx *ctx;
 555	struct sahara_aes_reqctx *rctx;
 
 556	int ret;
 557	unsigned long timeout;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 558
 559	/* Request is ready to be dispatched by the device */
 560	dev_dbg(dev->device,
 561		"dispatch request (nbytes=%d, src=%p, dst=%p)\n",
 562		req->cryptlen, req->src, req->dst);
 563
 564	/* assign new request to device */
 565	dev->total = req->cryptlen;
 
 566	dev->in_sg = req->src;
 567	dev->out_sg = req->dst;
 568
 569	rctx = skcipher_request_ctx(req);
 570	ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
 571	rctx->mode &= FLAGS_MODE_MASK;
 572	dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
 573
 574	if ((dev->flags & FLAGS_CBC) && req->iv)
 575		memcpy(dev->iv_base, req->iv, AES_KEYSIZE_128);
 576
 577	/* assign new context to device */
 
 578	dev->ctx = ctx;
 579
 580	reinit_completion(&dev->dma_completion);
 581
 582	ret = sahara_hw_descriptor_create(dev);
 583	if (ret)
 584		return -EINVAL;
 585
 586	timeout = wait_for_completion_timeout(&dev->dma_completion,
 587				msecs_to_jiffies(SAHARA_TIMEOUT_MS));
 588	if (!timeout) {
 589		dev_err(dev->device, "AES timeout\n");
 590		return -ETIMEDOUT;
 591	}
 592
 593	dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
 594		DMA_FROM_DEVICE);
 595	dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
 596		DMA_TO_DEVICE);
 597
 598	return 0;
 599}
 600
 601static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
 602			     unsigned int keylen)
 603{
 604	struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
 
 605
 606	ctx->keylen = keylen;
 607
 608	/* SAHARA only supports 128bit keys */
 609	if (keylen == AES_KEYSIZE_128) {
 610		memcpy(ctx->key, key, keylen);
 611		ctx->flags |= FLAGS_NEW_KEY;
 612		return 0;
 613	}
 614
 615	if (keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
 
 616		return -EINVAL;
 617
 618	/*
 619	 * The requested key size is not supported by HW, do a fallback.
 620	 */
 621	crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
 622	crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
 623						 CRYPTO_TFM_REQ_MASK);
 624	return crypto_skcipher_setkey(ctx->fallback, key, keylen);
 
 
 
 
 
 
 
 
 
 625}
 626
 627static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
 628{
 629	struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
 
 
 630	struct sahara_dev *dev = dev_ptr;
 631	int err = 0;
 
 632
 633	dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
 634		req->cryptlen, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
 635
 636	if (!IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE)) {
 637		dev_err(dev->device,
 638			"request size is not exact amount of AES blocks\n");
 639		return -EINVAL;
 640	}
 641
 642	rctx->mode = mode;
 643
 644	mutex_lock(&dev->queue_mutex);
 645	err = crypto_enqueue_request(&dev->queue, &req->base);
 646	mutex_unlock(&dev->queue_mutex);
 
 
 647
 648	wake_up_process(dev->kthread);
 
 649
 650	return err;
 651}
 652
 653static int sahara_aes_ecb_encrypt(struct skcipher_request *req)
 654{
 655	struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
 656	struct sahara_ctx *ctx = crypto_skcipher_ctx(
 657		crypto_skcipher_reqtfm(req));
 
 
 658
 659	if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
 660		skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
 661		skcipher_request_set_callback(&rctx->fallback_req,
 662					      req->base.flags,
 663					      req->base.complete,
 664					      req->base.data);
 665		skcipher_request_set_crypt(&rctx->fallback_req, req->src,
 666					   req->dst, req->cryptlen, req->iv);
 667		return crypto_skcipher_encrypt(&rctx->fallback_req);
 668	}
 669
 670	return sahara_aes_crypt(req, FLAGS_ENCRYPT);
 671}
 672
 673static int sahara_aes_ecb_decrypt(struct skcipher_request *req)
 674{
 675	struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
 676	struct sahara_ctx *ctx = crypto_skcipher_ctx(
 677		crypto_skcipher_reqtfm(req));
 
 
 678
 679	if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
 680		skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
 681		skcipher_request_set_callback(&rctx->fallback_req,
 682					      req->base.flags,
 683					      req->base.complete,
 684					      req->base.data);
 685		skcipher_request_set_crypt(&rctx->fallback_req, req->src,
 686					   req->dst, req->cryptlen, req->iv);
 687		return crypto_skcipher_decrypt(&rctx->fallback_req);
 688	}
 689
 690	return sahara_aes_crypt(req, 0);
 691}
 692
 693static int sahara_aes_cbc_encrypt(struct skcipher_request *req)
 694{
 695	struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
 696	struct sahara_ctx *ctx = crypto_skcipher_ctx(
 697		crypto_skcipher_reqtfm(req));
 
 
 698
 699	if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
 700		skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
 701		skcipher_request_set_callback(&rctx->fallback_req,
 702					      req->base.flags,
 703					      req->base.complete,
 704					      req->base.data);
 705		skcipher_request_set_crypt(&rctx->fallback_req, req->src,
 706					   req->dst, req->cryptlen, req->iv);
 707		return crypto_skcipher_encrypt(&rctx->fallback_req);
 708	}
 709
 710	return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
 711}
 712
 713static int sahara_aes_cbc_decrypt(struct skcipher_request *req)
 714{
 715	struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
 716	struct sahara_ctx *ctx = crypto_skcipher_ctx(
 717		crypto_skcipher_reqtfm(req));
 
 
 718
 719	if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
 720		skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
 721		skcipher_request_set_callback(&rctx->fallback_req,
 722					      req->base.flags,
 723					      req->base.complete,
 724					      req->base.data);
 725		skcipher_request_set_crypt(&rctx->fallback_req, req->src,
 726					   req->dst, req->cryptlen, req->iv);
 727		return crypto_skcipher_decrypt(&rctx->fallback_req);
 728	}
 729
 730	return sahara_aes_crypt(req, FLAGS_CBC);
 731}
 732
 733static int sahara_aes_init_tfm(struct crypto_skcipher *tfm)
 734{
 735	const char *name = crypto_tfm_alg_name(&tfm->base);
 736	struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
 737
 738	ctx->fallback = crypto_alloc_skcipher(name, 0,
 739					      CRYPTO_ALG_NEED_FALLBACK);
 740	if (IS_ERR(ctx->fallback)) {
 741		pr_err("Error allocating fallback algo %s\n", name);
 742		return PTR_ERR(ctx->fallback);
 743	}
 744
 745	crypto_skcipher_set_reqsize(tfm, sizeof(struct sahara_aes_reqctx) +
 746					 crypto_skcipher_reqsize(ctx->fallback));
 747
 748	return 0;
 749}
 750
 751static void sahara_aes_exit_tfm(struct crypto_skcipher *tfm)
 752{
 753	struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
 754
 755	crypto_free_skcipher(ctx->fallback);
 756}
 757
 758static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
 759			      struct sahara_sha_reqctx *rctx)
 760{
 761	u32 hdr = 0;
 762
 763	hdr = rctx->mode;
 764
 765	if (rctx->first) {
 766		hdr |= SAHARA_HDR_MDHA_SET_MODE_HASH;
 767		hdr |= SAHARA_HDR_MDHA_INIT;
 768	} else {
 769		hdr |= SAHARA_HDR_MDHA_SET_MODE_MD_KEY;
 770	}
 771
 772	if (rctx->last)
 773		hdr |= SAHARA_HDR_MDHA_PDATA;
 774
 775	if (hweight_long(hdr) % 2 == 0)
 776		hdr |= SAHARA_HDR_PARITY_BIT;
 777
 778	return hdr;
 779}
 780
 781static int sahara_sha_hw_links_create(struct sahara_dev *dev,
 782				       struct sahara_sha_reqctx *rctx,
 783				       int start)
 784{
 785	struct scatterlist *sg;
 786	unsigned int i;
 787	int ret;
 788
 789	dev->in_sg = rctx->in_sg;
 790
 791	dev->nb_in_sg = sg_nents_for_len(dev->in_sg, rctx->total);
 792	if (dev->nb_in_sg < 0) {
 793		dev_err(dev->device, "Invalid numbers of src SG.\n");
 794		return dev->nb_in_sg;
 795	}
 796	if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) {
 797		dev_err(dev->device, "not enough hw links (%d)\n",
 798			dev->nb_in_sg + dev->nb_out_sg);
 799		return -EINVAL;
 800	}
 801
 802	sg = dev->in_sg;
 803	ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg, DMA_TO_DEVICE);
 804	if (!ret)
 805		return -EFAULT;
 806
 807	for (i = start; i < dev->nb_in_sg + start; i++) {
 808		dev->hw_link[i]->len = sg->length;
 809		dev->hw_link[i]->p = sg->dma_address;
 810		if (i == (dev->nb_in_sg + start - 1)) {
 811			dev->hw_link[i]->next = 0;
 812		} else {
 813			dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
 814			sg = sg_next(sg);
 815		}
 816	}
 817
 818	return i;
 819}
 820
 821static int sahara_sha_hw_data_descriptor_create(struct sahara_dev *dev,
 822						struct sahara_sha_reqctx *rctx,
 823						struct ahash_request *req,
 824						int index)
 825{
 826	unsigned result_len;
 827	int i = index;
 828
 829	if (rctx->first)
 830		/* Create initial descriptor: #8*/
 831		dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
 832	else
 833		/* Create hash descriptor: #10. Must follow #6. */
 834		dev->hw_desc[index]->hdr = SAHARA_HDR_MDHA_HASH;
 835
 836	dev->hw_desc[index]->len1 = rctx->total;
 837	if (dev->hw_desc[index]->len1 == 0) {
 838		/* if len1 is 0, p1 must be 0, too */
 839		dev->hw_desc[index]->p1 = 0;
 840		rctx->sg_in_idx = 0;
 841	} else {
 842		/* Create input links */
 843		dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
 844		i = sahara_sha_hw_links_create(dev, rctx, index);
 845
 846		rctx->sg_in_idx = index;
 847		if (i < 0)
 848			return i;
 849	}
 850
 851	dev->hw_desc[index]->p2 = dev->hw_phys_link[i];
 852
 853	/* Save the context for the next operation */
 854	result_len = rctx->context_size;
 855	dev->hw_link[i]->p = dev->context_phys_base;
 856
 857	dev->hw_link[i]->len = result_len;
 858	dev->hw_desc[index]->len2 = result_len;
 859
 860	dev->hw_link[i]->next = 0;
 861
 862	return 0;
 863}
 864
 865/*
 866 * Load descriptor aka #6
 867 *
 868 * To load a previously saved context back to the MDHA unit
 869 *
 870 * p1: Saved Context
 871 * p2: NULL
 872 *
 873 */
 874static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
 875						struct sahara_sha_reqctx *rctx,
 876						struct ahash_request *req,
 877						int index)
 878{
 879	dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
 880
 881	dev->hw_desc[index]->len1 = rctx->context_size;
 882	dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
 883	dev->hw_desc[index]->len2 = 0;
 884	dev->hw_desc[index]->p2 = 0;
 885
 886	dev->hw_link[index]->len = rctx->context_size;
 887	dev->hw_link[index]->p = dev->context_phys_base;
 888	dev->hw_link[index]->next = 0;
 889
 890	return 0;
 891}
 892
 893static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes)
 894{
 895	if (!sg || !sg->length)
 896		return nbytes;
 897
 898	while (nbytes && sg) {
 899		if (nbytes <= sg->length) {
 900			sg->length = nbytes;
 901			sg_mark_end(sg);
 902			break;
 903		}
 904		nbytes -= sg->length;
 905		sg = sg_next(sg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 906	}
 907
 908	return nbytes;
 909}
 910
 911static int sahara_sha_prepare_request(struct ahash_request *req)
 912{
 913	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 914	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
 915	unsigned int hash_later;
 916	unsigned int block_size;
 917	unsigned int len;
 918
 919	block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
 920
 921	/* append bytes from previous operation */
 922	len = rctx->buf_cnt + req->nbytes;
 923
 924	/* only the last transfer can be padded in hardware */
 925	if (!rctx->last && (len < block_size)) {
 926		/* to few data, save for next operation */
 927		scatterwalk_map_and_copy(rctx->buf + rctx->buf_cnt, req->src,
 928					 0, req->nbytes, 0);
 929		rctx->buf_cnt += req->nbytes;
 930
 931		return 0;
 932	}
 933
 934	/* add data from previous operation first */
 935	if (rctx->buf_cnt)
 936		memcpy(rctx->rembuf, rctx->buf, rctx->buf_cnt);
 937
 938	/* data must always be a multiple of block_size */
 939	hash_later = rctx->last ? 0 : len & (block_size - 1);
 940	if (hash_later) {
 941		unsigned int offset = req->nbytes - hash_later;
 942		/* Save remaining bytes for later use */
 943		scatterwalk_map_and_copy(rctx->buf, req->src, offset,
 944					hash_later, 0);
 945	}
 946
 947	/* nbytes should now be multiple of blocksize */
 948	req->nbytes = req->nbytes - hash_later;
 949
 950	sahara_walk_and_recalc(req->src, req->nbytes);
 951
 952	/* have data from previous operation and current */
 953	if (rctx->buf_cnt && req->nbytes) {
 954		sg_init_table(rctx->in_sg_chain, 2);
 955		sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
 956
 957		sg_chain(rctx->in_sg_chain, 2, req->src);
 958
 959		rctx->total = req->nbytes + rctx->buf_cnt;
 960		rctx->in_sg = rctx->in_sg_chain;
 961
 962		req->src = rctx->in_sg_chain;
 963	/* only data from previous operation */
 964	} else if (rctx->buf_cnt) {
 965		if (req->src)
 966			rctx->in_sg = req->src;
 967		else
 968			rctx->in_sg = rctx->in_sg_chain;
 969		/* buf was copied into rembuf above */
 970		sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
 971		rctx->total = rctx->buf_cnt;
 972	/* no data from previous operation */
 973	} else {
 974		rctx->in_sg = req->src;
 975		rctx->total = req->nbytes;
 976		req->src = rctx->in_sg;
 977	}
 978
 979	/* on next call, we only have the remaining data in the buffer */
 980	rctx->buf_cnt = hash_later;
 981
 982	return -EINPROGRESS;
 983}
 984
 985static int sahara_sha_process(struct ahash_request *req)
 986{
 987	struct sahara_dev *dev = dev_ptr;
 988	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
 989	int ret;
 990	unsigned long timeout;
 991
 992	ret = sahara_sha_prepare_request(req);
 993	if (!ret)
 994		return ret;
 995
 996	if (rctx->first) {
 997		sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
 998		dev->hw_desc[0]->next = 0;
 999		rctx->first = 0;
1000	} else {
1001		memcpy(dev->context_base, rctx->context, rctx->context_size);
1002
1003		sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
1004		dev->hw_desc[0]->next = dev->hw_phys_desc[1];
1005		sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
1006		dev->hw_desc[1]->next = 0;
1007	}
1008
1009	sahara_dump_descriptors(dev);
1010	sahara_dump_links(dev);
1011
1012	reinit_completion(&dev->dma_completion);
1013
1014	sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
1015
1016	timeout = wait_for_completion_timeout(&dev->dma_completion,
1017				msecs_to_jiffies(SAHARA_TIMEOUT_MS));
1018	if (!timeout) {
1019		dev_err(dev->device, "SHA timeout\n");
1020		return -ETIMEDOUT;
1021	}
1022
1023	if (rctx->sg_in_idx)
1024		dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
1025			     DMA_TO_DEVICE);
1026
1027	memcpy(rctx->context, dev->context_base, rctx->context_size);
1028
1029	if (req->result)
1030		memcpy(req->result, rctx->context, rctx->digest_size);
1031
1032	return 0;
1033}
1034
1035static int sahara_queue_manage(void *data)
1036{
1037	struct sahara_dev *dev = (struct sahara_dev *)data;
1038	struct crypto_async_request *async_req;
1039	struct crypto_async_request *backlog;
1040	int ret = 0;
1041
1042	do {
1043		__set_current_state(TASK_INTERRUPTIBLE);
1044
1045		mutex_lock(&dev->queue_mutex);
1046		backlog = crypto_get_backlog(&dev->queue);
1047		async_req = crypto_dequeue_request(&dev->queue);
1048		mutex_unlock(&dev->queue_mutex);
1049
1050		if (backlog)
1051			backlog->complete(backlog, -EINPROGRESS);
1052
1053		if (async_req) {
1054			if (crypto_tfm_alg_type(async_req->tfm) ==
1055			    CRYPTO_ALG_TYPE_AHASH) {
1056				struct ahash_request *req =
1057					ahash_request_cast(async_req);
1058
1059				ret = sahara_sha_process(req);
1060			} else {
1061				struct skcipher_request *req =
1062					skcipher_request_cast(async_req);
1063
1064				ret = sahara_aes_process(req);
1065			}
1066
1067			async_req->complete(async_req, ret);
1068
1069			continue;
1070		}
1071
1072		schedule();
1073	} while (!kthread_should_stop());
1074
1075	return 0;
1076}
1077
1078static int sahara_sha_enqueue(struct ahash_request *req, int last)
1079{
1080	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1081	struct sahara_dev *dev = dev_ptr;
1082	int ret;
1083
1084	if (!req->nbytes && !last)
1085		return 0;
1086
1087	rctx->last = last;
1088
1089	if (!rctx->active) {
1090		rctx->active = 1;
1091		rctx->first = 1;
1092	}
1093
1094	mutex_lock(&dev->queue_mutex);
1095	ret = crypto_enqueue_request(&dev->queue, &req->base);
1096	mutex_unlock(&dev->queue_mutex);
1097
1098	wake_up_process(dev->kthread);
1099
1100	return ret;
1101}
1102
1103static int sahara_sha_init(struct ahash_request *req)
1104{
1105	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1106	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1107
1108	memset(rctx, 0, sizeof(*rctx));
1109
1110	switch (crypto_ahash_digestsize(tfm)) {
1111	case SHA1_DIGEST_SIZE:
1112		rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA1;
1113		rctx->digest_size = SHA1_DIGEST_SIZE;
1114		break;
1115	case SHA256_DIGEST_SIZE:
1116		rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA256;
1117		rctx->digest_size = SHA256_DIGEST_SIZE;
1118		break;
1119	default:
1120		return -EINVAL;
1121	}
1122
1123	rctx->context_size = rctx->digest_size + 4;
1124	rctx->active = 0;
1125
1126	return 0;
1127}
1128
1129static int sahara_sha_update(struct ahash_request *req)
1130{
1131	return sahara_sha_enqueue(req, 0);
1132}
1133
1134static int sahara_sha_final(struct ahash_request *req)
1135{
1136	req->nbytes = 0;
1137	return sahara_sha_enqueue(req, 1);
1138}
1139
1140static int sahara_sha_finup(struct ahash_request *req)
1141{
1142	return sahara_sha_enqueue(req, 1);
1143}
1144
1145static int sahara_sha_digest(struct ahash_request *req)
1146{
1147	sahara_sha_init(req);
1148
1149	return sahara_sha_finup(req);
1150}
1151
1152static int sahara_sha_export(struct ahash_request *req, void *out)
1153{
1154	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1155
1156	memcpy(out, rctx, sizeof(struct sahara_sha_reqctx));
1157
1158	return 0;
1159}
1160
1161static int sahara_sha_import(struct ahash_request *req, const void *in)
1162{
1163	struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1164
1165	memcpy(rctx, in, sizeof(struct sahara_sha_reqctx));
1166
1167	return 0;
1168}
1169
1170static int sahara_sha_cra_init(struct crypto_tfm *tfm)
1171{
1172	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1173				 sizeof(struct sahara_sha_reqctx) +
1174				 SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
1175
1176	return 0;
1177}
1178
1179static struct skcipher_alg aes_algs[] = {
1180{
1181	.base.cra_name		= "ecb(aes)",
1182	.base.cra_driver_name	= "sahara-ecb-aes",
1183	.base.cra_priority	= 300,
1184	.base.cra_flags		= CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1185	.base.cra_blocksize	= AES_BLOCK_SIZE,
1186	.base.cra_ctxsize	= sizeof(struct sahara_ctx),
1187	.base.cra_alignmask	= 0x0,
1188	.base.cra_module	= THIS_MODULE,
1189
1190	.init			= sahara_aes_init_tfm,
1191	.exit			= sahara_aes_exit_tfm,
1192	.min_keysize		= AES_MIN_KEY_SIZE ,
1193	.max_keysize		= AES_MAX_KEY_SIZE,
1194	.setkey			= sahara_aes_setkey,
1195	.encrypt		= sahara_aes_ecb_encrypt,
1196	.decrypt		= sahara_aes_ecb_decrypt,
1197}, {
1198	.base.cra_name		= "cbc(aes)",
1199	.base.cra_driver_name	= "sahara-cbc-aes",
1200	.base.cra_priority	= 300,
1201	.base.cra_flags		= CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1202	.base.cra_blocksize	= AES_BLOCK_SIZE,
1203	.base.cra_ctxsize	= sizeof(struct sahara_ctx),
1204	.base.cra_alignmask	= 0x0,
1205	.base.cra_module	= THIS_MODULE,
1206
1207	.init			= sahara_aes_init_tfm,
1208	.exit			= sahara_aes_exit_tfm,
1209	.min_keysize		= AES_MIN_KEY_SIZE ,
1210	.max_keysize		= AES_MAX_KEY_SIZE,
1211	.ivsize			= AES_BLOCK_SIZE,
1212	.setkey			= sahara_aes_setkey,
1213	.encrypt		= sahara_aes_cbc_encrypt,
1214	.decrypt		= sahara_aes_cbc_decrypt,
1215}
1216};
1217
1218static struct ahash_alg sha_v3_algs[] = {
1219{
1220	.init		= sahara_sha_init,
1221	.update		= sahara_sha_update,
1222	.final		= sahara_sha_final,
1223	.finup		= sahara_sha_finup,
1224	.digest		= sahara_sha_digest,
1225	.export		= sahara_sha_export,
1226	.import		= sahara_sha_import,
1227	.halg.digestsize	= SHA1_DIGEST_SIZE,
1228	.halg.statesize         = sizeof(struct sahara_sha_reqctx),
1229	.halg.base	= {
1230		.cra_name		= "sha1",
1231		.cra_driver_name	= "sahara-sha1",
1232		.cra_priority		= 300,
1233		.cra_flags		= CRYPTO_ALG_ASYNC |
1234						CRYPTO_ALG_NEED_FALLBACK,
1235		.cra_blocksize		= SHA1_BLOCK_SIZE,
1236		.cra_ctxsize		= sizeof(struct sahara_ctx),
1237		.cra_alignmask		= 0,
1238		.cra_module		= THIS_MODULE,
1239		.cra_init		= sahara_sha_cra_init,
1240	}
1241},
1242};
1243
1244static struct ahash_alg sha_v4_algs[] = {
1245{
1246	.init		= sahara_sha_init,
1247	.update		= sahara_sha_update,
1248	.final		= sahara_sha_final,
1249	.finup		= sahara_sha_finup,
1250	.digest		= sahara_sha_digest,
1251	.export		= sahara_sha_export,
1252	.import		= sahara_sha_import,
1253	.halg.digestsize	= SHA256_DIGEST_SIZE,
1254	.halg.statesize         = sizeof(struct sahara_sha_reqctx),
1255	.halg.base	= {
1256		.cra_name		= "sha256",
1257		.cra_driver_name	= "sahara-sha256",
1258		.cra_priority		= 300,
1259		.cra_flags		= CRYPTO_ALG_ASYNC |
1260						CRYPTO_ALG_NEED_FALLBACK,
1261		.cra_blocksize		= SHA256_BLOCK_SIZE,
1262		.cra_ctxsize		= sizeof(struct sahara_ctx),
1263		.cra_alignmask		= 0,
1264		.cra_module		= THIS_MODULE,
1265		.cra_init		= sahara_sha_cra_init,
1266	}
1267},
1268};
1269
1270static irqreturn_t sahara_irq_handler(int irq, void *data)
1271{
1272	struct sahara_dev *dev = (struct sahara_dev *)data;
1273	unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
1274	unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
1275
 
 
1276	sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR,
1277		     SAHARA_REG_CMD);
1278
1279	sahara_decode_status(dev, stat);
1280
1281	if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY) {
1282		return IRQ_NONE;
1283	} else if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_COMPLETE) {
1284		dev->error = 0;
1285	} else {
1286		sahara_decode_error(dev, err);
1287		dev->error = -EINVAL;
1288	}
1289
1290	complete(&dev->dma_completion);
1291
1292	return IRQ_HANDLED;
1293}
1294
1295
1296static int sahara_register_algs(struct sahara_dev *dev)
1297{
1298	int err;
1299	unsigned int i, j, k, l;
1300
1301	for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
1302		err = crypto_register_skcipher(&aes_algs[i]);
 
1303		if (err)
1304			goto err_aes_algs;
1305	}
1306
1307	for (k = 0; k < ARRAY_SIZE(sha_v3_algs); k++) {
1308		err = crypto_register_ahash(&sha_v3_algs[k]);
1309		if (err)
1310			goto err_sha_v3_algs;
1311	}
1312
1313	if (dev->version > SAHARA_VERSION_3)
1314		for (l = 0; l < ARRAY_SIZE(sha_v4_algs); l++) {
1315			err = crypto_register_ahash(&sha_v4_algs[l]);
1316			if (err)
1317				goto err_sha_v4_algs;
1318		}
1319
1320	return 0;
1321
1322err_sha_v4_algs:
1323	for (j = 0; j < l; j++)
1324		crypto_unregister_ahash(&sha_v4_algs[j]);
1325
1326err_sha_v3_algs:
1327	for (j = 0; j < k; j++)
1328		crypto_unregister_ahash(&sha_v3_algs[j]);
1329
1330err_aes_algs:
1331	for (j = 0; j < i; j++)
1332		crypto_unregister_skcipher(&aes_algs[j]);
1333
1334	return err;
1335}
1336
1337static void sahara_unregister_algs(struct sahara_dev *dev)
1338{
1339	unsigned int i;
1340
1341	for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1342		crypto_unregister_skcipher(&aes_algs[i]);
1343
1344	for (i = 0; i < ARRAY_SIZE(sha_v3_algs); i++)
1345		crypto_unregister_ahash(&sha_v3_algs[i]);
1346
1347	if (dev->version > SAHARA_VERSION_3)
1348		for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
1349			crypto_unregister_ahash(&sha_v4_algs[i]);
1350}
1351
1352static const struct platform_device_id sahara_platform_ids[] = {
1353	{ .name = "sahara-imx27" },
1354	{ /* sentinel */ }
1355};
1356MODULE_DEVICE_TABLE(platform, sahara_platform_ids);
1357
1358static const struct of_device_id sahara_dt_ids[] = {
1359	{ .compatible = "fsl,imx53-sahara" },
1360	{ .compatible = "fsl,imx27-sahara" },
1361	{ /* sentinel */ }
1362};
1363MODULE_DEVICE_TABLE(of, sahara_dt_ids);
1364
1365static int sahara_probe(struct platform_device *pdev)
1366{
1367	struct sahara_dev *dev;
 
1368	u32 version;
1369	int irq;
1370	int err;
1371	int i;
1372
1373	dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
1374	if (!dev)
 
1375		return -ENOMEM;
 
1376
1377	dev->device = &pdev->dev;
1378	platform_set_drvdata(pdev, dev);
1379
1380	/* Get the base address */
1381	dev->regs_base = devm_platform_ioremap_resource(pdev, 0);
 
1382	if (IS_ERR(dev->regs_base))
1383		return PTR_ERR(dev->regs_base);
1384
1385	/* Get the IRQ */
1386	irq = platform_get_irq(pdev,  0);
1387	if (irq < 0)
 
1388		return irq;
 
1389
1390	err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
1391			       0, dev_name(&pdev->dev), dev);
1392	if (err) {
1393		dev_err(&pdev->dev, "failed to request irq\n");
1394		return err;
1395	}
1396
1397	/* clocks */
1398	dev->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1399	if (IS_ERR(dev->clk_ipg)) {
1400		dev_err(&pdev->dev, "Could not get ipg clock\n");
1401		return PTR_ERR(dev->clk_ipg);
1402	}
1403
1404	dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
1405	if (IS_ERR(dev->clk_ahb)) {
1406		dev_err(&pdev->dev, "Could not get ahb clock\n");
1407		return PTR_ERR(dev->clk_ahb);
1408	}
1409
1410	/* Allocate HW descriptors */
1411	dev->hw_desc[0] = dmam_alloc_coherent(&pdev->dev,
1412			SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1413			&dev->hw_phys_desc[0], GFP_KERNEL);
1414	if (!dev->hw_desc[0]) {
1415		dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
1416		return -ENOMEM;
1417	}
1418	dev->hw_desc[1] = dev->hw_desc[0] + 1;
1419	dev->hw_phys_desc[1] = dev->hw_phys_desc[0] +
1420				sizeof(struct sahara_hw_desc);
1421
1422	/* Allocate space for iv and key */
1423	dev->key_base = dmam_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
1424				&dev->key_phys_base, GFP_KERNEL);
1425	if (!dev->key_base) {
1426		dev_err(&pdev->dev, "Could not allocate memory for key\n");
1427		return -ENOMEM;
 
1428	}
1429	dev->iv_base = dev->key_base + AES_KEYSIZE_128;
1430	dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
1431
1432	/* Allocate space for context: largest digest + message length field */
1433	dev->context_base = dmam_alloc_coherent(&pdev->dev,
1434					SHA256_DIGEST_SIZE + 4,
1435					&dev->context_phys_base, GFP_KERNEL);
1436	if (!dev->context_base) {
1437		dev_err(&pdev->dev, "Could not allocate memory for MDHA context\n");
1438		return -ENOMEM;
1439	}
1440
1441	/* Allocate space for HW links */
1442	dev->hw_link[0] = dmam_alloc_coherent(&pdev->dev,
1443			SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
1444			&dev->hw_phys_link[0], GFP_KERNEL);
1445	if (!dev->hw_link[0]) {
1446		dev_err(&pdev->dev, "Could not allocate hw links\n");
1447		return -ENOMEM;
 
1448	}
1449	for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
1450		dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
1451					sizeof(struct sahara_hw_link);
1452		dev->hw_link[i] = dev->hw_link[i - 1] + 1;
1453	}
1454
1455	crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
1456
1457	mutex_init(&dev->queue_mutex);
1458
1459	dev_ptr = dev;
1460
1461	dev->kthread = kthread_run(sahara_queue_manage, dev, "sahara_crypto");
1462	if (IS_ERR(dev->kthread)) {
1463		return PTR_ERR(dev->kthread);
1464	}
1465
1466	init_completion(&dev->dma_completion);
 
 
1467
1468	err = clk_prepare_enable(dev->clk_ipg);
1469	if (err)
1470		return err;
1471	err = clk_prepare_enable(dev->clk_ahb);
1472	if (err)
1473		goto clk_ipg_disable;
1474
1475	version = sahara_read(dev, SAHARA_REG_VERSION);
1476	if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) {
1477		if (version != SAHARA_VERSION_3)
1478			err = -ENODEV;
1479	} else if (of_device_is_compatible(pdev->dev.of_node,
1480			"fsl,imx53-sahara")) {
1481		if (((version >> 8) & 0xff) != SAHARA_VERSION_4)
1482			err = -ENODEV;
1483		version = (version >> 8) & 0xff;
1484	}
1485	if (err == -ENODEV) {
1486		dev_err(&pdev->dev, "SAHARA version %d not supported\n",
1487				version);
 
1488		goto err_algs;
1489	}
1490
1491	dev->version = version;
1492
1493	sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH,
1494		     SAHARA_REG_CMD);
1495	sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) |
1496			SAHARA_CONTROL_SET_MAXBURST(8) |
1497			SAHARA_CONTROL_RNG_AUTORSD |
1498			SAHARA_CONTROL_ENABLE_INT,
1499			SAHARA_REG_CONTROL);
1500
1501	err = sahara_register_algs(dev);
1502	if (err)
1503		goto err_algs;
1504
1505	dev_info(&pdev->dev, "SAHARA version %d initialized\n", version);
1506
1507	return 0;
1508
1509err_algs:
1510	kthread_stop(dev->kthread);
1511	dev_ptr = NULL;
1512	clk_disable_unprepare(dev->clk_ahb);
1513clk_ipg_disable:
1514	clk_disable_unprepare(dev->clk_ipg);
 
 
 
 
 
 
 
 
 
 
1515
1516	return err;
1517}
1518
1519static int sahara_remove(struct platform_device *pdev)
1520{
1521	struct sahara_dev *dev = platform_get_drvdata(pdev);
1522
1523	kthread_stop(dev->kthread);
 
 
 
 
 
 
 
 
 
 
 
1524
1525	sahara_unregister_algs(dev);
1526
1527	clk_disable_unprepare(dev->clk_ipg);
1528	clk_disable_unprepare(dev->clk_ahb);
1529
1530	dev_ptr = NULL;
1531
1532	return 0;
1533}
1534
1535static struct platform_driver sahara_driver = {
1536	.probe		= sahara_probe,
1537	.remove		= sahara_remove,
1538	.driver		= {
1539		.name	= SAHARA_NAME,
 
1540		.of_match_table = sahara_dt_ids,
1541	},
1542	.id_table = sahara_platform_ids,
1543};
1544
1545module_platform_driver(sahara_driver);
1546
1547MODULE_LICENSE("GPL");
1548MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
1549MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de>");
1550MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");
v3.15
 
   1/*
   2 * Cryptographic API.
   3 *
   4 * Support for SAHARA cryptographic accelerator.
   5 *
 
   6 * Copyright (c) 2013 Vista Silicon S.L.
   7 * Author: Javier Martin <javier.martin@vista-silicon.com>
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License version 2 as published
  11 * by the Free Software Foundation.
  12 *
  13 * Based on omap-aes.c and tegra-aes.c
  14 */
  15
  16#include <crypto/algapi.h>
  17#include <crypto/aes.h>
 
 
 
 
  18
  19#include <linux/clk.h>
  20#include <linux/crypto.h>
  21#include <linux/interrupt.h>
  22#include <linux/io.h>
  23#include <linux/irq.h>
  24#include <linux/kernel.h>
 
  25#include <linux/module.h>
 
  26#include <linux/of.h>
 
  27#include <linux/platform_device.h>
  28
 
 
 
  29#define SAHARA_NAME "sahara"
  30#define SAHARA_VERSION_3	3
 
  31#define SAHARA_TIMEOUT_MS	1000
  32#define SAHARA_MAX_HW_DESC	2
  33#define SAHARA_MAX_HW_LINK	20
  34
  35#define FLAGS_MODE_MASK		0x000f
  36#define FLAGS_ENCRYPT		BIT(0)
  37#define FLAGS_CBC		BIT(1)
  38#define FLAGS_NEW_KEY		BIT(3)
  39#define FLAGS_BUSY		4
  40
  41#define SAHARA_HDR_BASE			0x00800000
  42#define SAHARA_HDR_SKHA_ALG_AES	0
  43#define SAHARA_HDR_SKHA_OP_ENC		(1 << 2)
  44#define SAHARA_HDR_SKHA_MODE_ECB	(0 << 3)
  45#define SAHARA_HDR_SKHA_MODE_CBC	(1 << 3)
  46#define SAHARA_HDR_FORM_DATA		(5 << 16)
  47#define SAHARA_HDR_FORM_KEY		(8 << 16)
  48#define SAHARA_HDR_LLO			(1 << 24)
  49#define SAHARA_HDR_CHA_SKHA		(1 << 28)
  50#define SAHARA_HDR_CHA_MDHA		(2 << 28)
  51#define SAHARA_HDR_PARITY_BIT		(1 << 31)
  52
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  53/* SAHARA can only process one request at a time */
  54#define SAHARA_QUEUE_LENGTH	1
  55
  56#define SAHARA_REG_VERSION	0x00
  57#define SAHARA_REG_DAR		0x04
  58#define SAHARA_REG_CONTROL	0x08
  59#define		SAHARA_CONTROL_SET_THROTTLE(x)	(((x) & 0xff) << 24)
  60#define		SAHARA_CONTROL_SET_MAXBURST(x)	(((x) & 0xff) << 16)
  61#define		SAHARA_CONTROL_RNG_AUTORSD	(1 << 7)
  62#define		SAHARA_CONTROL_ENABLE_INT	(1 << 4)
  63#define SAHARA_REG_CMD		0x0C
  64#define		SAHARA_CMD_RESET		(1 << 0)
  65#define		SAHARA_CMD_CLEAR_INT		(1 << 8)
  66#define		SAHARA_CMD_CLEAR_ERR		(1 << 9)
  67#define		SAHARA_CMD_SINGLE_STEP		(1 << 10)
  68#define		SAHARA_CMD_MODE_BATCH		(1 << 16)
  69#define		SAHARA_CMD_MODE_DEBUG		(1 << 18)
  70#define	SAHARA_REG_STATUS	0x10
  71#define		SAHARA_STATUS_GET_STATE(x)	((x) & 0x7)
  72#define			SAHARA_STATE_IDLE	0
  73#define			SAHARA_STATE_BUSY	1
  74#define			SAHARA_STATE_ERR	2
  75#define			SAHARA_STATE_FAULT	3
  76#define			SAHARA_STATE_COMPLETE	4
  77#define			SAHARA_STATE_COMP_FLAG	(1 << 2)
  78#define		SAHARA_STATUS_DAR_FULL		(1 << 3)
  79#define		SAHARA_STATUS_ERROR		(1 << 4)
  80#define		SAHARA_STATUS_SECURE		(1 << 5)
  81#define		SAHARA_STATUS_FAIL		(1 << 6)
  82#define		SAHARA_STATUS_INIT		(1 << 7)
  83#define		SAHARA_STATUS_RNG_RESEED	(1 << 8)
  84#define		SAHARA_STATUS_ACTIVE_RNG	(1 << 9)
  85#define		SAHARA_STATUS_ACTIVE_MDHA	(1 << 10)
  86#define		SAHARA_STATUS_ACTIVE_SKHA	(1 << 11)
  87#define		SAHARA_STATUS_MODE_BATCH	(1 << 16)
  88#define		SAHARA_STATUS_MODE_DEDICATED	(1 << 17)
  89#define		SAHARA_STATUS_MODE_DEBUG	(1 << 18)
  90#define		SAHARA_STATUS_GET_ISTATE(x)	(((x) >> 24) & 0xff)
  91#define SAHARA_REG_ERRSTATUS	0x14
  92#define		SAHARA_ERRSTATUS_GET_SOURCE(x)	((x) & 0xf)
  93#define			SAHARA_ERRSOURCE_CHA	14
  94#define			SAHARA_ERRSOURCE_DMA	15
  95#define		SAHARA_ERRSTATUS_DMA_DIR	(1 << 8)
  96#define		SAHARA_ERRSTATUS_GET_DMASZ(x)(((x) >> 9) & 0x3)
  97#define		SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7)
  98#define		SAHARA_ERRSTATUS_GET_CHASRC(x)	(((x) >> 16) & 0xfff)
  99#define		SAHARA_ERRSTATUS_GET_CHAERR(x)	(((x) >> 28) & 0x3)
 100#define SAHARA_REG_FADDR	0x18
 101#define SAHARA_REG_CDAR		0x1C
 102#define SAHARA_REG_IDAR		0x20
 103
 104struct sahara_hw_desc {
 105	u32		hdr;
 106	u32		len1;
 107	dma_addr_t	p1;
 108	u32		len2;
 109	dma_addr_t	p2;
 110	dma_addr_t	next;
 111};
 112
 113struct sahara_hw_link {
 114	u32		len;
 115	dma_addr_t	p;
 116	dma_addr_t	next;
 117};
 118
 119struct sahara_ctx {
 120	struct sahara_dev *dev;
 121	unsigned long flags;
 
 
 122	int keylen;
 123	u8 key[AES_KEYSIZE_128];
 124	struct crypto_ablkcipher *fallback;
 125};
 126
 127struct sahara_aes_reqctx {
 128	unsigned long mode;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 129};
 130
 131struct sahara_dev {
 132	struct device		*device;
 
 133	void __iomem		*regs_base;
 134	struct clk		*clk_ipg;
 135	struct clk		*clk_ahb;
 
 
 
 136
 137	struct sahara_ctx	*ctx;
 138	spinlock_t		lock;
 139	struct crypto_queue	queue;
 140	unsigned long		flags;
 141
 142	struct tasklet_struct	done_task;
 143	struct tasklet_struct	queue_task;
 144
 145	struct sahara_hw_desc	*hw_desc[SAHARA_MAX_HW_DESC];
 146	dma_addr_t		hw_phys_desc[SAHARA_MAX_HW_DESC];
 147
 148	u8			*key_base;
 149	dma_addr_t		key_phys_base;
 150
 151	u8			*iv_base;
 152	dma_addr_t		iv_phys_base;
 153
 
 
 
 154	struct sahara_hw_link	*hw_link[SAHARA_MAX_HW_LINK];
 155	dma_addr_t		hw_phys_link[SAHARA_MAX_HW_LINK];
 156
 157	struct ablkcipher_request *req;
 158	size_t			total;
 159	struct scatterlist	*in_sg;
 160	unsigned int		nb_in_sg;
 161	struct scatterlist	*out_sg;
 162	unsigned int		nb_out_sg;
 163
 164	u32			error;
 165	struct timer_list	watchdog;
 166};
 167
 168static struct sahara_dev *dev_ptr;
 169
 170static inline void sahara_write(struct sahara_dev *dev, u32 data, u32 reg)
 171{
 172	writel(data, dev->regs_base + reg);
 173}
 174
 175static inline unsigned int sahara_read(struct sahara_dev *dev, u32 reg)
 176{
 177	return readl(dev->regs_base + reg);
 178}
 179
 180static u32 sahara_aes_key_hdr(struct sahara_dev *dev)
 181{
 182	u32 hdr = SAHARA_HDR_BASE | SAHARA_HDR_SKHA_ALG_AES |
 183			SAHARA_HDR_FORM_KEY | SAHARA_HDR_LLO |
 184			SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
 185
 186	if (dev->flags & FLAGS_CBC) {
 187		hdr |= SAHARA_HDR_SKHA_MODE_CBC;
 188		hdr ^= SAHARA_HDR_PARITY_BIT;
 189	}
 190
 191	if (dev->flags & FLAGS_ENCRYPT) {
 192		hdr |= SAHARA_HDR_SKHA_OP_ENC;
 193		hdr ^= SAHARA_HDR_PARITY_BIT;
 194	}
 195
 196	return hdr;
 197}
 198
 199static u32 sahara_aes_data_link_hdr(struct sahara_dev *dev)
 200{
 201	return SAHARA_HDR_BASE | SAHARA_HDR_FORM_DATA |
 202			SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
 203}
 204
 205static int sahara_sg_length(struct scatterlist *sg,
 206			    unsigned int total)
 207{
 208	int sg_nb;
 209	unsigned int len;
 210	struct scatterlist *sg_list;
 211
 212	sg_nb = 0;
 213	sg_list = sg;
 214
 215	while (total) {
 216		len = min(sg_list->length, total);
 217
 218		sg_nb++;
 219		total -= len;
 220
 221		sg_list = sg_next(sg_list);
 222		if (!sg_list)
 223			total = 0;
 224	}
 225
 226	return sg_nb;
 227}
 228
 229static char *sahara_err_src[16] = {
 230	"No error",
 231	"Header error",
 232	"Descriptor length error",
 233	"Descriptor length or pointer error",
 234	"Link length error",
 235	"Link pointer error",
 236	"Input buffer error",
 237	"Output buffer error",
 238	"Output buffer starvation",
 239	"Internal state fault",
 240	"General descriptor problem",
 241	"Reserved",
 242	"Descriptor address error",
 243	"Link address error",
 244	"CHA error",
 245	"DMA error"
 246};
 247
 248static char *sahara_err_dmasize[4] = {
 249	"Byte transfer",
 250	"Half-word transfer",
 251	"Word transfer",
 252	"Reserved"
 253};
 254
 255static char *sahara_err_dmasrc[8] = {
 256	"No error",
 257	"AHB bus error",
 258	"Internal IP bus error",
 259	"Parity error",
 260	"DMA crosses 256 byte boundary",
 261	"DMA is busy",
 262	"Reserved",
 263	"DMA HW error"
 264};
 265
 266static char *sahara_cha_errsrc[12] = {
 267	"Input buffer non-empty",
 268	"Illegal address",
 269	"Illegal mode",
 270	"Illegal data size",
 271	"Illegal key size",
 272	"Write during processing",
 273	"CTX read during processing",
 274	"HW error",
 275	"Input buffer disabled/underflow",
 276	"Output buffer disabled/overflow",
 277	"DES key parity error",
 278	"Reserved"
 279};
 280
 281static char *sahara_cha_err[4] = { "No error", "SKHA", "MDHA", "RNG" };
 282
 283static void sahara_decode_error(struct sahara_dev *dev, unsigned int error)
 284{
 285	u8 source = SAHARA_ERRSTATUS_GET_SOURCE(error);
 286	u16 chasrc = ffs(SAHARA_ERRSTATUS_GET_CHASRC(error));
 287
 288	dev_err(dev->device, "%s: Error Register = 0x%08x\n", __func__, error);
 289
 290	dev_err(dev->device, "	- %s.\n", sahara_err_src[source]);
 291
 292	if (source == SAHARA_ERRSOURCE_DMA) {
 293		if (error & SAHARA_ERRSTATUS_DMA_DIR)
 294			dev_err(dev->device, "		* DMA read.\n");
 295		else
 296			dev_err(dev->device, "		* DMA write.\n");
 297
 298		dev_err(dev->device, "		* %s.\n",
 299		       sahara_err_dmasize[SAHARA_ERRSTATUS_GET_DMASZ(error)]);
 300		dev_err(dev->device, "		* %s.\n",
 301		       sahara_err_dmasrc[SAHARA_ERRSTATUS_GET_DMASRC(error)]);
 302	} else if (source == SAHARA_ERRSOURCE_CHA) {
 303		dev_err(dev->device, "		* %s.\n",
 304			sahara_cha_errsrc[chasrc]);
 305		dev_err(dev->device, "		* %s.\n",
 306		       sahara_cha_err[SAHARA_ERRSTATUS_GET_CHAERR(error)]);
 307	}
 308	dev_err(dev->device, "\n");
 309}
 310
 311static char *sahara_state[4] = { "Idle", "Busy", "Error", "HW Fault" };
 312
 313static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
 314{
 315	u8 state;
 316
 317	if (!IS_ENABLED(DEBUG))
 318		return;
 319
 320	state = SAHARA_STATUS_GET_STATE(status);
 321
 322	dev_dbg(dev->device, "%s: Status Register = 0x%08x\n",
 323		__func__, status);
 324
 325	dev_dbg(dev->device, "	- State = %d:\n", state);
 326	if (state & SAHARA_STATE_COMP_FLAG)
 327		dev_dbg(dev->device, "		* Descriptor completed. IRQ pending.\n");
 328
 329	dev_dbg(dev->device, "		* %s.\n",
 330	       sahara_state[state & ~SAHARA_STATE_COMP_FLAG]);
 331
 332	if (status & SAHARA_STATUS_DAR_FULL)
 333		dev_dbg(dev->device, "	- DAR Full.\n");
 334	if (status & SAHARA_STATUS_ERROR)
 335		dev_dbg(dev->device, "	- Error.\n");
 336	if (status & SAHARA_STATUS_SECURE)
 337		dev_dbg(dev->device, "	- Secure.\n");
 338	if (status & SAHARA_STATUS_FAIL)
 339		dev_dbg(dev->device, "	- Fail.\n");
 340	if (status & SAHARA_STATUS_RNG_RESEED)
 341		dev_dbg(dev->device, "	- RNG Reseed Request.\n");
 342	if (status & SAHARA_STATUS_ACTIVE_RNG)
 343		dev_dbg(dev->device, "	- RNG Active.\n");
 344	if (status & SAHARA_STATUS_ACTIVE_MDHA)
 345		dev_dbg(dev->device, "	- MDHA Active.\n");
 346	if (status & SAHARA_STATUS_ACTIVE_SKHA)
 347		dev_dbg(dev->device, "	- SKHA Active.\n");
 348
 349	if (status & SAHARA_STATUS_MODE_BATCH)
 350		dev_dbg(dev->device, "	- Batch Mode.\n");
 351	else if (status & SAHARA_STATUS_MODE_DEDICATED)
 352		dev_dbg(dev->device, "	- Decidated Mode.\n");
 353	else if (status & SAHARA_STATUS_MODE_DEBUG)
 354		dev_dbg(dev->device, "	- Debug Mode.\n");
 355
 356	dev_dbg(dev->device, "	- Internal state = 0x%02x\n",
 357	       SAHARA_STATUS_GET_ISTATE(status));
 358
 359	dev_dbg(dev->device, "Current DAR: 0x%08x\n",
 360		sahara_read(dev, SAHARA_REG_CDAR));
 361	dev_dbg(dev->device, "Initial DAR: 0x%08x\n\n",
 362		sahara_read(dev, SAHARA_REG_IDAR));
 363}
 364
 365static void sahara_dump_descriptors(struct sahara_dev *dev)
 366{
 367	int i;
 368
 369	if (!IS_ENABLED(DEBUG))
 370		return;
 371
 372	for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
 373		dev_dbg(dev->device, "Descriptor (%d) (0x%08x):\n",
 374			i, dev->hw_phys_desc[i]);
 375		dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr);
 376		dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1);
 377		dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1);
 378		dev_dbg(dev->device, "\tlen2 = %u\n", dev->hw_desc[i]->len2);
 379		dev_dbg(dev->device, "\tp2 = 0x%08x\n", dev->hw_desc[i]->p2);
 380		dev_dbg(dev->device, "\tnext = 0x%08x\n",
 381			dev->hw_desc[i]->next);
 382	}
 383	dev_dbg(dev->device, "\n");
 384}
 385
 386static void sahara_dump_links(struct sahara_dev *dev)
 387{
 388	int i;
 389
 390	if (!IS_ENABLED(DEBUG))
 391		return;
 392
 393	for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
 394		dev_dbg(dev->device, "Link (%d) (0x%08x):\n",
 395			i, dev->hw_phys_link[i]);
 396		dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len);
 397		dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p);
 398		dev_dbg(dev->device, "\tnext = 0x%08x\n",
 399			dev->hw_link[i]->next);
 400	}
 401	dev_dbg(dev->device, "\n");
 402}
 403
 404static void sahara_aes_done_task(unsigned long data)
 405{
 406	struct sahara_dev *dev = (struct sahara_dev *)data;
 407
 408	dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
 409		DMA_TO_DEVICE);
 410	dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
 411		DMA_FROM_DEVICE);
 412
 413	spin_lock(&dev->lock);
 414	clear_bit(FLAGS_BUSY, &dev->flags);
 415	spin_unlock(&dev->lock);
 416
 417	dev->req->base.complete(&dev->req->base, dev->error);
 418}
 419
 420static void sahara_watchdog(unsigned long data)
 421{
 422	struct sahara_dev *dev = (struct sahara_dev *)data;
 423	unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
 424	unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
 425
 426	sahara_decode_status(dev, stat);
 427	sahara_decode_error(dev, err);
 428	dev->error = -ETIMEDOUT;
 429	sahara_aes_done_task(data);
 430}
 431
 432static int sahara_hw_descriptor_create(struct sahara_dev *dev)
 433{
 434	struct sahara_ctx *ctx = dev->ctx;
 435	struct scatterlist *sg;
 436	int ret;
 437	int i, j;
 
 438
 439	/* Copy new key if necessary */
 440	if (ctx->flags & FLAGS_NEW_KEY) {
 441		memcpy(dev->key_base, ctx->key, ctx->keylen);
 442		ctx->flags &= ~FLAGS_NEW_KEY;
 443
 444		if (dev->flags & FLAGS_CBC) {
 445			dev->hw_desc[0]->len1 = AES_BLOCK_SIZE;
 446			dev->hw_desc[0]->p1 = dev->iv_phys_base;
 447		} else {
 448			dev->hw_desc[0]->len1 = 0;
 449			dev->hw_desc[0]->p1 = 0;
 450		}
 451		dev->hw_desc[0]->len2 = ctx->keylen;
 452		dev->hw_desc[0]->p2 = dev->key_phys_base;
 453		dev->hw_desc[0]->next = dev->hw_phys_desc[1];
 
 
 
 
 454	}
 455	dev->hw_desc[0]->hdr = sahara_aes_key_hdr(dev);
 456
 457	dev->nb_in_sg = sahara_sg_length(dev->in_sg, dev->total);
 458	dev->nb_out_sg = sahara_sg_length(dev->out_sg, dev->total);
 
 
 
 
 
 
 
 
 459	if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
 460		dev_err(dev->device, "not enough hw links (%d)\n",
 461			dev->nb_in_sg + dev->nb_out_sg);
 462		return -EINVAL;
 463	}
 464
 465	ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
 466			 DMA_TO_DEVICE);
 467	if (ret != dev->nb_in_sg) {
 468		dev_err(dev->device, "couldn't map in sg\n");
 469		goto unmap_in;
 470	}
 471	ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
 472			 DMA_FROM_DEVICE);
 473	if (ret != dev->nb_out_sg) {
 474		dev_err(dev->device, "couldn't map out sg\n");
 475		goto unmap_out;
 476	}
 477
 478	/* Create input links */
 479	dev->hw_desc[1]->p1 = dev->hw_phys_link[0];
 480	sg = dev->in_sg;
 481	for (i = 0; i < dev->nb_in_sg; i++) {
 482		dev->hw_link[i]->len = sg->length;
 483		dev->hw_link[i]->p = sg->dma_address;
 484		if (i == (dev->nb_in_sg - 1)) {
 485			dev->hw_link[i]->next = 0;
 486		} else {
 487			dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
 488			sg = sg_next(sg);
 489		}
 490	}
 491
 492	/* Create output links */
 493	dev->hw_desc[1]->p2 = dev->hw_phys_link[i];
 494	sg = dev->out_sg;
 495	for (j = i; j < dev->nb_out_sg + i; j++) {
 496		dev->hw_link[j]->len = sg->length;
 497		dev->hw_link[j]->p = sg->dma_address;
 498		if (j == (dev->nb_out_sg + i - 1)) {
 499			dev->hw_link[j]->next = 0;
 500		} else {
 501			dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
 502			sg = sg_next(sg);
 503		}
 504	}
 505
 506	/* Fill remaining fields of hw_desc[1] */
 507	dev->hw_desc[1]->hdr = sahara_aes_data_link_hdr(dev);
 508	dev->hw_desc[1]->len1 = dev->total;
 509	dev->hw_desc[1]->len2 = dev->total;
 510	dev->hw_desc[1]->next = 0;
 511
 512	sahara_dump_descriptors(dev);
 513	sahara_dump_links(dev);
 514
 515	/* Start processing descriptor chain. */
 516	mod_timer(&dev->watchdog,
 517		  jiffies + msecs_to_jiffies(SAHARA_TIMEOUT_MS));
 518	sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
 519
 520	return 0;
 521
 522unmap_out:
 523	dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
 524		DMA_TO_DEVICE);
 525unmap_in:
 526	dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
 527		DMA_FROM_DEVICE);
 528
 529	return -EINVAL;
 530}
 531
 532static void sahara_aes_queue_task(unsigned long data)
 533{
 534	struct sahara_dev *dev = (struct sahara_dev *)data;
 535	struct crypto_async_request *async_req, *backlog;
 536	struct sahara_ctx *ctx;
 537	struct sahara_aes_reqctx *rctx;
 538	struct ablkcipher_request *req;
 539	int ret;
 540
 541	spin_lock(&dev->lock);
 542	backlog = crypto_get_backlog(&dev->queue);
 543	async_req = crypto_dequeue_request(&dev->queue);
 544	if (!async_req)
 545		clear_bit(FLAGS_BUSY, &dev->flags);
 546	spin_unlock(&dev->lock);
 547
 548	if (!async_req)
 549		return;
 550
 551	if (backlog)
 552		backlog->complete(backlog, -EINPROGRESS);
 553
 554	req = ablkcipher_request_cast(async_req);
 555
 556	/* Request is ready to be dispatched by the device */
 557	dev_dbg(dev->device,
 558		"dispatch request (nbytes=%d, src=%p, dst=%p)\n",
 559		req->nbytes, req->src, req->dst);
 560
 561	/* assign new request to device */
 562	dev->req = req;
 563	dev->total = req->nbytes;
 564	dev->in_sg = req->src;
 565	dev->out_sg = req->dst;
 566
 567	rctx = ablkcipher_request_ctx(req);
 568	ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
 569	rctx->mode &= FLAGS_MODE_MASK;
 570	dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
 571
 572	if ((dev->flags & FLAGS_CBC) && req->info)
 573		memcpy(dev->iv_base, req->info, AES_KEYSIZE_128);
 574
 575	/* assign new context to device */
 576	ctx->dev = dev;
 577	dev->ctx = ctx;
 578
 
 
 579	ret = sahara_hw_descriptor_create(dev);
 580	if (ret < 0) {
 581		spin_lock(&dev->lock);
 582		clear_bit(FLAGS_BUSY, &dev->flags);
 583		spin_unlock(&dev->lock);
 584		dev->req->base.complete(&dev->req->base, ret);
 
 
 
 585	}
 
 
 
 
 
 
 
 586}
 587
 588static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
 589			     unsigned int keylen)
 590{
 591	struct sahara_ctx *ctx = crypto_ablkcipher_ctx(tfm);
 592	int ret;
 593
 594	ctx->keylen = keylen;
 595
 596	/* SAHARA only supports 128bit keys */
 597	if (keylen == AES_KEYSIZE_128) {
 598		memcpy(ctx->key, key, keylen);
 599		ctx->flags |= FLAGS_NEW_KEY;
 600		return 0;
 601	}
 602
 603	if (keylen != AES_KEYSIZE_128 &&
 604	    keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
 605		return -EINVAL;
 606
 607	/*
 608	 * The requested key size is not supported by HW, do a fallback.
 609	 */
 610	ctx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
 611	ctx->fallback->base.crt_flags |=
 612		(tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
 613
 614	ret = crypto_ablkcipher_setkey(ctx->fallback, key, keylen);
 615	if (ret) {
 616		struct crypto_tfm *tfm_aux = crypto_ablkcipher_tfm(tfm);
 617
 618		tfm_aux->crt_flags &= ~CRYPTO_TFM_RES_MASK;
 619		tfm_aux->crt_flags |=
 620			(ctx->fallback->base.crt_flags & CRYPTO_TFM_RES_MASK);
 621	}
 622	return ret;
 623}
 624
 625static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
 626{
 627	struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
 628		crypto_ablkcipher_reqtfm(req));
 629	struct sahara_aes_reqctx *rctx = ablkcipher_request_ctx(req);
 630	struct sahara_dev *dev = dev_ptr;
 631	int err = 0;
 632	int busy;
 633
 634	dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
 635		req->nbytes, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
 636
 637	if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
 638		dev_err(dev->device,
 639			"request size is not exact amount of AES blocks\n");
 640		return -EINVAL;
 641	}
 642
 643	ctx->dev = dev;
 644
 645	rctx->mode = mode;
 646	spin_lock_bh(&dev->lock);
 647	err = ablkcipher_enqueue_request(&dev->queue, req);
 648	busy = test_and_set_bit(FLAGS_BUSY, &dev->flags);
 649	spin_unlock_bh(&dev->lock);
 650
 651	if (!busy)
 652		tasklet_schedule(&dev->queue_task);
 653
 654	return err;
 655}
 656
 657static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req)
 658{
 659	struct crypto_tfm *tfm =
 660		crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
 661	struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
 662		crypto_ablkcipher_reqtfm(req));
 663	int err;
 664
 665	if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
 666		ablkcipher_request_set_tfm(req, ctx->fallback);
 667		err = crypto_ablkcipher_encrypt(req);
 668		ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
 669		return err;
 
 
 
 
 670	}
 671
 672	return sahara_aes_crypt(req, FLAGS_ENCRYPT);
 673}
 674
 675static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req)
 676{
 677	struct crypto_tfm *tfm =
 678		crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
 679	struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
 680		crypto_ablkcipher_reqtfm(req));
 681	int err;
 682
 683	if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
 684		ablkcipher_request_set_tfm(req, ctx->fallback);
 685		err = crypto_ablkcipher_decrypt(req);
 686		ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
 687		return err;
 
 
 
 
 688	}
 689
 690	return sahara_aes_crypt(req, 0);
 691}
 692
 693static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req)
 694{
 695	struct crypto_tfm *tfm =
 696		crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
 697	struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
 698		crypto_ablkcipher_reqtfm(req));
 699	int err;
 700
 701	if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
 702		ablkcipher_request_set_tfm(req, ctx->fallback);
 703		err = crypto_ablkcipher_encrypt(req);
 704		ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
 705		return err;
 
 
 
 
 706	}
 707
 708	return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
 709}
 710
 711static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req)
 712{
 713	struct crypto_tfm *tfm =
 714		crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
 715	struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
 716		crypto_ablkcipher_reqtfm(req));
 717	int err;
 718
 719	if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
 720		ablkcipher_request_set_tfm(req, ctx->fallback);
 721		err = crypto_ablkcipher_decrypt(req);
 722		ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
 723		return err;
 
 
 
 
 724	}
 725
 726	return sahara_aes_crypt(req, FLAGS_CBC);
 727}
 728
 729static int sahara_aes_cra_init(struct crypto_tfm *tfm)
 730{
 731	const char *name = tfm->__crt_alg->cra_name;
 732	struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
 733
 734	ctx->fallback = crypto_alloc_ablkcipher(name, 0,
 735				CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
 736	if (IS_ERR(ctx->fallback)) {
 737		pr_err("Error allocating fallback algo %s\n", name);
 738		return PTR_ERR(ctx->fallback);
 739	}
 740
 741	tfm->crt_ablkcipher.reqsize = sizeof(struct sahara_aes_reqctx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 742
 743	return 0;
 744}
 745
 746static void sahara_aes_cra_exit(struct crypto_tfm *tfm)
 747{
 748	struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
 
 749
 750	if (ctx->fallback)
 751		crypto_free_ablkcipher(ctx->fallback);
 752	ctx->fallback = NULL;
 753}
 754
 755static struct crypto_alg aes_algs[] = {
 756{
 757	.cra_name		= "ecb(aes)",
 758	.cra_driver_name	= "sahara-ecb-aes",
 759	.cra_priority		= 300,
 760	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
 761			CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
 762	.cra_blocksize		= AES_BLOCK_SIZE,
 763	.cra_ctxsize		= sizeof(struct sahara_ctx),
 764	.cra_alignmask		= 0x0,
 765	.cra_type		= &crypto_ablkcipher_type,
 766	.cra_module		= THIS_MODULE,
 767	.cra_init		= sahara_aes_cra_init,
 768	.cra_exit		= sahara_aes_cra_exit,
 769	.cra_u.ablkcipher = {
 770		.min_keysize	= AES_MIN_KEY_SIZE ,
 771		.max_keysize	= AES_MAX_KEY_SIZE,
 772		.setkey		= sahara_aes_setkey,
 773		.encrypt	= sahara_aes_ecb_encrypt,
 774		.decrypt	= sahara_aes_ecb_decrypt,
 775	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 776}, {
 777	.cra_name		= "cbc(aes)",
 778	.cra_driver_name	= "sahara-cbc-aes",
 779	.cra_priority		= 300,
 780	.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
 781			CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
 782	.cra_blocksize		= AES_BLOCK_SIZE,
 783	.cra_ctxsize		= sizeof(struct sahara_ctx),
 784	.cra_alignmask		= 0x0,
 785	.cra_type		= &crypto_ablkcipher_type,
 786	.cra_module		= THIS_MODULE,
 787	.cra_init		= sahara_aes_cra_init,
 788	.cra_exit		= sahara_aes_cra_exit,
 789	.cra_u.ablkcipher = {
 790		.min_keysize	= AES_MIN_KEY_SIZE ,
 791		.max_keysize	= AES_MAX_KEY_SIZE,
 792		.ivsize		= AES_BLOCK_SIZE,
 793		.setkey		= sahara_aes_setkey,
 794		.encrypt	= sahara_aes_cbc_encrypt,
 795		.decrypt	= sahara_aes_cbc_decrypt,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 796	}
 797}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 798};
 799
 800static irqreturn_t sahara_irq_handler(int irq, void *data)
 801{
 802	struct sahara_dev *dev = (struct sahara_dev *)data;
 803	unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
 804	unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
 805
 806	del_timer(&dev->watchdog);
 807
 808	sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR,
 809		     SAHARA_REG_CMD);
 810
 811	sahara_decode_status(dev, stat);
 812
 813	if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY) {
 814		return IRQ_NONE;
 815	} else if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_COMPLETE) {
 816		dev->error = 0;
 817	} else {
 818		sahara_decode_error(dev, err);
 819		dev->error = -EINVAL;
 820	}
 821
 822	tasklet_schedule(&dev->done_task);
 823
 824	return IRQ_HANDLED;
 825}
 826
 827
 828static int sahara_register_algs(struct sahara_dev *dev)
 829{
 830	int err, i, j;
 
 831
 832	for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
 833		INIT_LIST_HEAD(&aes_algs[i].cra_list);
 834		err = crypto_register_alg(&aes_algs[i]);
 835		if (err)
 836			goto err_aes_algs;
 837	}
 838
 
 
 
 
 
 
 
 
 
 
 
 
 
 839	return 0;
 840
 
 
 
 
 
 
 
 
 841err_aes_algs:
 842	for (j = 0; j < i; j++)
 843		crypto_unregister_alg(&aes_algs[j]);
 844
 845	return err;
 846}
 847
 848static void sahara_unregister_algs(struct sahara_dev *dev)
 849{
 850	int i;
 851
 852	for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
 853		crypto_unregister_alg(&aes_algs[i]);
 
 
 
 
 
 
 
 854}
 855
 856static struct platform_device_id sahara_platform_ids[] = {
 857	{ .name = "sahara-imx27" },
 858	{ /* sentinel */ }
 859};
 860MODULE_DEVICE_TABLE(platform, sahara_platform_ids);
 861
 862static struct of_device_id sahara_dt_ids[] = {
 
 863	{ .compatible = "fsl,imx27-sahara" },
 864	{ /* sentinel */ }
 865};
 866MODULE_DEVICE_TABLE(of, sahara_dt_ids);
 867
 868static int sahara_probe(struct platform_device *pdev)
 869{
 870	struct sahara_dev *dev;
 871	struct resource *res;
 872	u32 version;
 873	int irq;
 874	int err;
 875	int i;
 876
 877	dev = devm_kzalloc(&pdev->dev, sizeof(struct sahara_dev), GFP_KERNEL);
 878	if (dev == NULL) {
 879		dev_err(&pdev->dev, "unable to alloc data struct.\n");
 880		return -ENOMEM;
 881	}
 882
 883	dev->device = &pdev->dev;
 884	platform_set_drvdata(pdev, dev);
 885
 886	/* Get the base address */
 887	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 888	dev->regs_base = devm_ioremap_resource(&pdev->dev, res);
 889	if (IS_ERR(dev->regs_base))
 890		return PTR_ERR(dev->regs_base);
 891
 892	/* Get the IRQ */
 893	irq = platform_get_irq(pdev,  0);
 894	if (irq < 0) {
 895		dev_err(&pdev->dev, "failed to get irq resource\n");
 896		return irq;
 897	}
 898
 899	err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
 900			       0, dev_name(&pdev->dev), dev);
 901	if (err) {
 902		dev_err(&pdev->dev, "failed to request irq\n");
 903		return err;
 904	}
 905
 906	/* clocks */
 907	dev->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
 908	if (IS_ERR(dev->clk_ipg)) {
 909		dev_err(&pdev->dev, "Could not get ipg clock\n");
 910		return PTR_ERR(dev->clk_ipg);
 911	}
 912
 913	dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
 914	if (IS_ERR(dev->clk_ahb)) {
 915		dev_err(&pdev->dev, "Could not get ahb clock\n");
 916		return PTR_ERR(dev->clk_ahb);
 917	}
 918
 919	/* Allocate HW descriptors */
 920	dev->hw_desc[0] = dma_alloc_coherent(&pdev->dev,
 921			SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
 922			&dev->hw_phys_desc[0], GFP_KERNEL);
 923	if (!dev->hw_desc[0]) {
 924		dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
 925		return -ENOMEM;
 926	}
 927	dev->hw_desc[1] = dev->hw_desc[0] + 1;
 928	dev->hw_phys_desc[1] = dev->hw_phys_desc[0] +
 929				sizeof(struct sahara_hw_desc);
 930
 931	/* Allocate space for iv and key */
 932	dev->key_base = dma_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
 933				&dev->key_phys_base, GFP_KERNEL);
 934	if (!dev->key_base) {
 935		dev_err(&pdev->dev, "Could not allocate memory for key\n");
 936		err = -ENOMEM;
 937		goto err_key;
 938	}
 939	dev->iv_base = dev->key_base + AES_KEYSIZE_128;
 940	dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
 941
 
 
 
 
 
 
 
 
 
 942	/* Allocate space for HW links */
 943	dev->hw_link[0] = dma_alloc_coherent(&pdev->dev,
 944			SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
 945			&dev->hw_phys_link[0], GFP_KERNEL);
 946	if (!dev->hw_link[0]) {
 947		dev_err(&pdev->dev, "Could not allocate hw links\n");
 948		err = -ENOMEM;
 949		goto err_link;
 950	}
 951	for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
 952		dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
 953					sizeof(struct sahara_hw_link);
 954		dev->hw_link[i] = dev->hw_link[i - 1] + 1;
 955	}
 956
 957	crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
 958
 
 
 959	dev_ptr = dev;
 960
 961	tasklet_init(&dev->queue_task, sahara_aes_queue_task,
 962		     (unsigned long)dev);
 963	tasklet_init(&dev->done_task, sahara_aes_done_task,
 964		     (unsigned long)dev);
 965
 966	init_timer(&dev->watchdog);
 967	dev->watchdog.function = &sahara_watchdog;
 968	dev->watchdog.data = (unsigned long)dev;
 969
 970	clk_prepare_enable(dev->clk_ipg);
 971	clk_prepare_enable(dev->clk_ahb);
 
 
 
 
 972
 973	version = sahara_read(dev, SAHARA_REG_VERSION);
 974	if (version != SAHARA_VERSION_3) {
 
 
 
 
 
 
 
 
 
 975		dev_err(&pdev->dev, "SAHARA version %d not supported\n",
 976			version);
 977		err = -ENODEV;
 978		goto err_algs;
 979	}
 980
 
 
 981	sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH,
 982		     SAHARA_REG_CMD);
 983	sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) |
 984			SAHARA_CONTROL_SET_MAXBURST(8) |
 985			SAHARA_CONTROL_RNG_AUTORSD |
 986			SAHARA_CONTROL_ENABLE_INT,
 987			SAHARA_REG_CONTROL);
 988
 989	err = sahara_register_algs(dev);
 990	if (err)
 991		goto err_algs;
 992
 993	dev_info(&pdev->dev, "SAHARA version %d initialized\n", version);
 994
 995	return 0;
 996
 997err_algs:
 998	dma_free_coherent(&pdev->dev,
 999			  SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
1000			  dev->hw_link[0], dev->hw_phys_link[0]);
 
1001	clk_disable_unprepare(dev->clk_ipg);
1002	clk_disable_unprepare(dev->clk_ahb);
1003	dev_ptr = NULL;
1004err_link:
1005	dma_free_coherent(&pdev->dev,
1006			  2 * AES_KEYSIZE_128,
1007			  dev->key_base, dev->key_phys_base);
1008err_key:
1009	dma_free_coherent(&pdev->dev,
1010			  SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1011			  dev->hw_desc[0], dev->hw_phys_desc[0]);
1012
1013	return err;
1014}
1015
1016static int sahara_remove(struct platform_device *pdev)
1017{
1018	struct sahara_dev *dev = platform_get_drvdata(pdev);
1019
1020	dma_free_coherent(&pdev->dev,
1021			  SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
1022			  dev->hw_link[0], dev->hw_phys_link[0]);
1023	dma_free_coherent(&pdev->dev,
1024			  2 * AES_KEYSIZE_128,
1025			  dev->key_base, dev->key_phys_base);
1026	dma_free_coherent(&pdev->dev,
1027			  SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1028			  dev->hw_desc[0], dev->hw_phys_desc[0]);
1029
1030	tasklet_kill(&dev->done_task);
1031	tasklet_kill(&dev->queue_task);
1032
1033	sahara_unregister_algs(dev);
1034
1035	clk_disable_unprepare(dev->clk_ipg);
1036	clk_disable_unprepare(dev->clk_ahb);
1037
1038	dev_ptr = NULL;
1039
1040	return 0;
1041}
1042
1043static struct platform_driver sahara_driver = {
1044	.probe		= sahara_probe,
1045	.remove		= sahara_remove,
1046	.driver		= {
1047		.name	= SAHARA_NAME,
1048		.owner	= THIS_MODULE,
1049		.of_match_table = sahara_dt_ids,
1050	},
1051	.id_table = sahara_platform_ids,
1052};
1053
1054module_platform_driver(sahara_driver);
1055
1056MODULE_LICENSE("GPL");
1057MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
 
1058MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");