Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Freescale i.MX23/i.MX28 Data Co-Processor driver
   4 *
   5 * Copyright (C) 2013 Marek Vasut <marex@denx.de>
 
 
 
 
 
 
 
   6 */
   7
 
   8#include <linux/dma-mapping.h>
   9#include <linux/interrupt.h>
  10#include <linux/io.h>
  11#include <linux/kernel.h>
  12#include <linux/kthread.h>
  13#include <linux/module.h>
  14#include <linux/of.h>
  15#include <linux/platform_device.h>
  16#include <linux/stmp_device.h>
  17#include <linux/clk.h>
  18
  19#include <crypto/aes.h>
  20#include <crypto/sha1.h>
  21#include <crypto/sha2.h>
  22#include <crypto/internal/hash.h>
  23#include <crypto/internal/skcipher.h>
  24#include <crypto/scatterwalk.h>
  25
  26#define DCP_MAX_CHANS	4
  27#define DCP_BUF_SZ	PAGE_SIZE
  28#define DCP_SHA_PAY_SZ  64
  29
  30#define DCP_ALIGNMENT	64
  31
  32/*
  33 * Null hashes to align with hw behavior on imx6sl and ull
  34 * these are flipped for consistency with hw output
  35 */
  36static const uint8_t sha1_null_hash[] =
  37	"\x09\x07\xd8\xaf\x90\x18\x60\x95\xef\xbf"
  38	"\x55\x32\x0d\x4b\x6b\x5e\xee\xa3\x39\xda";
  39
  40static const uint8_t sha256_null_hash[] =
  41	"\x55\xb8\x52\x78\x1b\x99\x95\xa4"
  42	"\x4c\x93\x9b\x64\xe4\x41\xae\x27"
  43	"\x24\xb9\x6f\x99\xc8\xf4\xfb\x9a"
  44	"\x14\x1c\xfc\x98\x42\xc4\xb0\xe3";
  45
  46/* DCP DMA descriptor. */
  47struct dcp_dma_desc {
  48	uint32_t	next_cmd_addr;
  49	uint32_t	control0;
  50	uint32_t	control1;
  51	uint32_t	source;
  52	uint32_t	destination;
  53	uint32_t	size;
  54	uint32_t	payload;
  55	uint32_t	status;
  56};
  57
  58/* Coherent aligned block for bounce buffering. */
  59struct dcp_coherent_block {
  60	uint8_t			aes_in_buf[DCP_BUF_SZ];
  61	uint8_t			aes_out_buf[DCP_BUF_SZ];
  62	uint8_t			sha_in_buf[DCP_BUF_SZ];
  63	uint8_t			sha_out_buf[DCP_SHA_PAY_SZ];
  64
  65	uint8_t			aes_key[2 * AES_KEYSIZE_128];
  66
  67	struct dcp_dma_desc	desc[DCP_MAX_CHANS];
  68};
  69
  70struct dcp {
  71	struct device			*dev;
  72	void __iomem			*base;
  73
  74	uint32_t			caps;
  75
  76	struct dcp_coherent_block	*coh;
  77
  78	struct completion		completion[DCP_MAX_CHANS];
  79	spinlock_t			lock[DCP_MAX_CHANS];
  80	struct task_struct		*thread[DCP_MAX_CHANS];
  81	struct crypto_queue		queue[DCP_MAX_CHANS];
  82	struct clk			*dcp_clk;
  83};
  84
  85enum dcp_chan {
  86	DCP_CHAN_HASH_SHA	= 0,
  87	DCP_CHAN_CRYPTO		= 2,
  88};
  89
  90struct dcp_async_ctx {
  91	/* Common context */
  92	enum dcp_chan	chan;
  93	uint32_t	fill;
  94
  95	/* SHA Hash-specific context */
  96	struct mutex			mutex;
  97	uint32_t			alg;
  98	unsigned int			hot:1;
  99
 100	/* Crypto-specific context */
 101	struct crypto_skcipher		*fallback;
 102	unsigned int			key_len;
 103	uint8_t				key[AES_KEYSIZE_128];
 104};
 105
 106struct dcp_aes_req_ctx {
 107	unsigned int	enc:1;
 108	unsigned int	ecb:1;
 109	struct skcipher_request fallback_req;	// keep at the end
 110};
 111
 112struct dcp_sha_req_ctx {
 113	unsigned int	init:1;
 114	unsigned int	fini:1;
 115};
 116
 117struct dcp_export_state {
 118	struct dcp_sha_req_ctx req_ctx;
 119	struct dcp_async_ctx async_ctx;
 120};
 121
 122/*
 123 * There can even be only one instance of the MXS DCP due to the
 124 * design of Linux Crypto API.
 125 */
 126static struct dcp *global_sdcp;
 127
 128/* DCP register layout. */
 129#define MXS_DCP_CTRL				0x00
 130#define MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES	(1 << 23)
 131#define MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING	(1 << 22)
 132
 133#define MXS_DCP_STAT				0x10
 134#define MXS_DCP_STAT_CLR			0x18
 135#define MXS_DCP_STAT_IRQ_MASK			0xf
 136
 137#define MXS_DCP_CHANNELCTRL			0x20
 138#define MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK	0xff
 139
 140#define MXS_DCP_CAPABILITY1			0x40
 141#define MXS_DCP_CAPABILITY1_SHA256		(4 << 16)
 142#define MXS_DCP_CAPABILITY1_SHA1		(1 << 16)
 143#define MXS_DCP_CAPABILITY1_AES128		(1 << 0)
 144
 145#define MXS_DCP_CONTEXT				0x50
 146
 147#define MXS_DCP_CH_N_CMDPTR(n)			(0x100 + ((n) * 0x40))
 148
 149#define MXS_DCP_CH_N_SEMA(n)			(0x110 + ((n) * 0x40))
 150
 151#define MXS_DCP_CH_N_STAT(n)			(0x120 + ((n) * 0x40))
 152#define MXS_DCP_CH_N_STAT_CLR(n)		(0x128 + ((n) * 0x40))
 153
 154/* DMA descriptor bits. */
 155#define MXS_DCP_CONTROL0_HASH_TERM		(1 << 13)
 156#define MXS_DCP_CONTROL0_HASH_INIT		(1 << 12)
 157#define MXS_DCP_CONTROL0_PAYLOAD_KEY		(1 << 11)
 158#define MXS_DCP_CONTROL0_CIPHER_ENCRYPT		(1 << 8)
 159#define MXS_DCP_CONTROL0_CIPHER_INIT		(1 << 9)
 160#define MXS_DCP_CONTROL0_ENABLE_HASH		(1 << 6)
 161#define MXS_DCP_CONTROL0_ENABLE_CIPHER		(1 << 5)
 162#define MXS_DCP_CONTROL0_DECR_SEMAPHORE		(1 << 1)
 163#define MXS_DCP_CONTROL0_INTERRUPT		(1 << 0)
 164
 165#define MXS_DCP_CONTROL1_HASH_SELECT_SHA256	(2 << 16)
 166#define MXS_DCP_CONTROL1_HASH_SELECT_SHA1	(0 << 16)
 167#define MXS_DCP_CONTROL1_CIPHER_MODE_CBC	(1 << 4)
 168#define MXS_DCP_CONTROL1_CIPHER_MODE_ECB	(0 << 4)
 169#define MXS_DCP_CONTROL1_CIPHER_SELECT_AES128	(0 << 0)
 170
 171static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
 172{
 173	int dma_err;
 174	struct dcp *sdcp = global_sdcp;
 175	const int chan = actx->chan;
 176	uint32_t stat;
 177	unsigned long ret;
 178	struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
 
 179	dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc),
 180					      DMA_TO_DEVICE);
 181
 182	dma_err = dma_mapping_error(sdcp->dev, desc_phys);
 183	if (dma_err)
 184		return dma_err;
 185
 186	reinit_completion(&sdcp->completion[chan]);
 187
 188	/* Clear status register. */
 189	writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(chan));
 190
 191	/* Load the DMA descriptor. */
 192	writel(desc_phys, sdcp->base + MXS_DCP_CH_N_CMDPTR(chan));
 193
 194	/* Increment the semaphore to start the DMA transfer. */
 195	writel(1, sdcp->base + MXS_DCP_CH_N_SEMA(chan));
 196
 197	ret = wait_for_completion_timeout(&sdcp->completion[chan],
 198					  msecs_to_jiffies(1000));
 199	if (!ret) {
 200		dev_err(sdcp->dev, "Channel %i timeout (DCP_STAT=0x%08x)\n",
 201			chan, readl(sdcp->base + MXS_DCP_STAT));
 202		return -ETIMEDOUT;
 203	}
 204
 205	stat = readl(sdcp->base + MXS_DCP_CH_N_STAT(chan));
 206	if (stat & 0xff) {
 207		dev_err(sdcp->dev, "Channel %i error (CH_STAT=0x%08x)\n",
 208			chan, stat);
 209		return -EINVAL;
 210	}
 211
 212	dma_unmap_single(sdcp->dev, desc_phys, sizeof(*desc), DMA_TO_DEVICE);
 213
 214	return 0;
 215}
 216
 217/*
 218 * Encryption (AES128)
 219 */
 220static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
 221			   struct skcipher_request *req, int init)
 222{
 223	dma_addr_t key_phys, src_phys, dst_phys;
 224	struct dcp *sdcp = global_sdcp;
 225	struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
 226	struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
 227	int ret;
 228
 229	key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
 230				  2 * AES_KEYSIZE_128, DMA_TO_DEVICE);
 231	ret = dma_mapping_error(sdcp->dev, key_phys);
 232	if (ret)
 233		return ret;
 234
 235	src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
 236				  DCP_BUF_SZ, DMA_TO_DEVICE);
 237	ret = dma_mapping_error(sdcp->dev, src_phys);
 238	if (ret)
 239		goto err_src;
 240
 241	dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
 242				  DCP_BUF_SZ, DMA_FROM_DEVICE);
 243	ret = dma_mapping_error(sdcp->dev, dst_phys);
 244	if (ret)
 245		goto err_dst;
 246
 247	if (actx->fill % AES_BLOCK_SIZE) {
 248		dev_err(sdcp->dev, "Invalid block size!\n");
 249		ret = -EINVAL;
 250		goto aes_done_run;
 251	}
 252
 253	/* Fill in the DMA descriptor. */
 254	desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
 255		    MXS_DCP_CONTROL0_INTERRUPT |
 256		    MXS_DCP_CONTROL0_ENABLE_CIPHER;
 257
 258	/* Payload contains the key. */
 259	desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY;
 260
 261	if (rctx->enc)
 262		desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT;
 263	if (init)
 264		desc->control0 |= MXS_DCP_CONTROL0_CIPHER_INIT;
 265
 266	desc->control1 = MXS_DCP_CONTROL1_CIPHER_SELECT_AES128;
 267
 268	if (rctx->ecb)
 269		desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_ECB;
 270	else
 271		desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_CBC;
 272
 273	desc->next_cmd_addr = 0;
 274	desc->source = src_phys;
 275	desc->destination = dst_phys;
 276	desc->size = actx->fill;
 277	desc->payload = key_phys;
 278	desc->status = 0;
 279
 280	ret = mxs_dcp_start_dma(actx);
 281
 282aes_done_run:
 283	dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE);
 284err_dst:
 285	dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
 286err_src:
 287	dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
 288			 DMA_TO_DEVICE);
 
 
 289
 290	return ret;
 291}
 292
 293static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
 294{
 295	struct dcp *sdcp = global_sdcp;
 296
 297	struct skcipher_request *req = skcipher_request_cast(arq);
 298	struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
 299	struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
 300
 301	struct scatterlist *dst = req->dst;
 302	struct scatterlist *src = req->src;
 303	int dst_nents = sg_nents(dst);
 304
 305	const int out_off = DCP_BUF_SZ;
 306	uint8_t *in_buf = sdcp->coh->aes_in_buf;
 307	uint8_t *out_buf = sdcp->coh->aes_out_buf;
 308
 
 309	uint32_t dst_off = 0;
 310	uint8_t *src_buf = NULL;
 311	uint32_t last_out_len = 0;
 312
 313	uint8_t *key = sdcp->coh->aes_key;
 314
 315	int ret = 0;
 316	unsigned int i, len, clen, tlen = 0;
 
 317	int init = 0;
 318	bool limit_hit = false;
 319
 320	actx->fill = 0;
 321
 322	/* Copy the key from the temporary location. */
 323	memcpy(key, actx->key, actx->key_len);
 324
 325	if (!rctx->ecb) {
 326		/* Copy the CBC IV just past the key. */
 327		memcpy(key + AES_KEYSIZE_128, req->iv, AES_KEYSIZE_128);
 328		/* CBC needs the INIT set. */
 329		init = 1;
 330	} else {
 331		memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128);
 332	}
 333
 334	for_each_sg(req->src, src, sg_nents(req->src), i) {
 335		src_buf = sg_virt(src);
 336		len = sg_dma_len(src);
 337		tlen += len;
 338		limit_hit = tlen > req->cryptlen;
 339
 340		if (limit_hit)
 341			len = req->cryptlen - (tlen - len);
 342
 343		do {
 344			if (actx->fill + len > out_off)
 345				clen = out_off - actx->fill;
 346			else
 347				clen = len;
 348
 349			memcpy(in_buf + actx->fill, src_buf, clen);
 350			len -= clen;
 351			src_buf += clen;
 352			actx->fill += clen;
 353
 354			/*
 355			 * If we filled the buffer or this is the last SG,
 356			 * submit the buffer.
 357			 */
 358			if (actx->fill == out_off || sg_is_last(src) ||
 359			    limit_hit) {
 360				ret = mxs_dcp_run_aes(actx, req, init);
 361				if (ret)
 362					return ret;
 363				init = 0;
 364
 365				sg_pcopy_from_buffer(dst, dst_nents, out_buf,
 366						     actx->fill, dst_off);
 367				dst_off += actx->fill;
 368				last_out_len = actx->fill;
 369				actx->fill = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 370			}
 371		} while (len);
 372
 373		if (limit_hit)
 374			break;
 375	}
 376
 377	/* Copy the IV for CBC for chaining */
 378	if (!rctx->ecb) {
 379		if (rctx->enc)
 380			memcpy(req->iv, out_buf+(last_out_len-AES_BLOCK_SIZE),
 381				AES_BLOCK_SIZE);
 382		else
 383			memcpy(req->iv, in_buf+(last_out_len-AES_BLOCK_SIZE),
 384				AES_BLOCK_SIZE);
 385	}
 386
 387	return ret;
 388}
 389
 390static int dcp_chan_thread_aes(void *data)
 391{
 392	struct dcp *sdcp = global_sdcp;
 393	const int chan = DCP_CHAN_CRYPTO;
 394
 395	struct crypto_async_request *backlog;
 396	struct crypto_async_request *arq;
 397
 398	int ret;
 399
 400	while (!kthread_should_stop()) {
 401		set_current_state(TASK_INTERRUPTIBLE);
 402
 403		spin_lock(&sdcp->lock[chan]);
 404		backlog = crypto_get_backlog(&sdcp->queue[chan]);
 405		arq = crypto_dequeue_request(&sdcp->queue[chan]);
 406		spin_unlock(&sdcp->lock[chan]);
 407
 408		if (!backlog && !arq) {
 409			schedule();
 410			continue;
 411		}
 412
 413		set_current_state(TASK_RUNNING);
 414
 415		if (backlog)
 416			crypto_request_complete(backlog, -EINPROGRESS);
 417
 418		if (arq) {
 419			ret = mxs_dcp_aes_block_crypt(arq);
 420			crypto_request_complete(arq, ret);
 
 421		}
 422	}
 
 
 423
 424	return 0;
 425}
 426
 427static int mxs_dcp_block_fallback(struct skcipher_request *req, int enc)
 428{
 429	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 430	struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
 431	struct dcp_async_ctx *ctx = crypto_skcipher_ctx(tfm);
 
 432	int ret;
 433
 434	skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
 435	skcipher_request_set_callback(&rctx->fallback_req, req->base.flags,
 436				      req->base.complete, req->base.data);
 437	skcipher_request_set_crypt(&rctx->fallback_req, req->src, req->dst,
 438				   req->cryptlen, req->iv);
 439
 440	if (enc)
 441		ret = crypto_skcipher_encrypt(&rctx->fallback_req);
 442	else
 443		ret = crypto_skcipher_decrypt(&rctx->fallback_req);
 
 
 444
 445	return ret;
 446}
 447
 448static int mxs_dcp_aes_enqueue(struct skcipher_request *req, int enc, int ecb)
 449{
 450	struct dcp *sdcp = global_sdcp;
 451	struct crypto_async_request *arq = &req->base;
 452	struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
 453	struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
 454	int ret;
 455
 456	if (unlikely(actx->key_len != AES_KEYSIZE_128))
 457		return mxs_dcp_block_fallback(req, enc);
 458
 459	rctx->enc = enc;
 460	rctx->ecb = ecb;
 461	actx->chan = DCP_CHAN_CRYPTO;
 462
 463	spin_lock(&sdcp->lock[actx->chan]);
 464	ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
 465	spin_unlock(&sdcp->lock[actx->chan]);
 466
 467	wake_up_process(sdcp->thread[actx->chan]);
 468
 469	return ret;
 470}
 471
 472static int mxs_dcp_aes_ecb_decrypt(struct skcipher_request *req)
 473{
 474	return mxs_dcp_aes_enqueue(req, 0, 1);
 475}
 476
 477static int mxs_dcp_aes_ecb_encrypt(struct skcipher_request *req)
 478{
 479	return mxs_dcp_aes_enqueue(req, 1, 1);
 480}
 481
 482static int mxs_dcp_aes_cbc_decrypt(struct skcipher_request *req)
 483{
 484	return mxs_dcp_aes_enqueue(req, 0, 0);
 485}
 486
 487static int mxs_dcp_aes_cbc_encrypt(struct skcipher_request *req)
 488{
 489	return mxs_dcp_aes_enqueue(req, 1, 0);
 490}
 491
 492static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
 493			      unsigned int len)
 494{
 495	struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
 
 496
 497	/*
 498	 * AES 128 is supposed by the hardware, store key into temporary
 499	 * buffer and exit. We must use the temporary buffer here, since
 500	 * there can still be an operation in progress.
 501	 */
 502	actx->key_len = len;
 503	if (len == AES_KEYSIZE_128) {
 504		memcpy(actx->key, key, len);
 505		return 0;
 506	}
 507
 
 
 
 
 
 
 508	/*
 509	 * If the requested AES key size is not supported by the hardware,
 510	 * but is supported by in-kernel software implementation, we use
 511	 * software fallback.
 512	 */
 513	crypto_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK);
 514	crypto_skcipher_set_flags(actx->fallback,
 515				  tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
 516	return crypto_skcipher_setkey(actx->fallback, key, len);
 
 
 
 
 
 
 
 
 
 517}
 518
 519static int mxs_dcp_aes_fallback_init_tfm(struct crypto_skcipher *tfm)
 520{
 521	const char *name = crypto_tfm_alg_name(crypto_skcipher_tfm(tfm));
 522	struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
 523	struct crypto_skcipher *blk;
 
 524
 525	blk = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
 526	if (IS_ERR(blk))
 527		return PTR_ERR(blk);
 528
 529	actx->fallback = blk;
 530	crypto_skcipher_set_reqsize(tfm, sizeof(struct dcp_aes_req_ctx) +
 531					 crypto_skcipher_reqsize(blk));
 532	return 0;
 533}
 534
 535static void mxs_dcp_aes_fallback_exit_tfm(struct crypto_skcipher *tfm)
 536{
 537	struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
 538
 539	crypto_free_skcipher(actx->fallback);
 
 540}
 541
 542/*
 543 * Hashing (SHA1/SHA256)
 544 */
 545static int mxs_dcp_run_sha(struct ahash_request *req)
 546{
 547	struct dcp *sdcp = global_sdcp;
 548	int ret;
 549
 550	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 551	struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
 552	struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
 
 
 553	struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
 554
 555	dma_addr_t digest_phys = 0;
 556	dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf,
 557					     DCP_BUF_SZ, DMA_TO_DEVICE);
 558
 559	ret = dma_mapping_error(sdcp->dev, buf_phys);
 560	if (ret)
 561		return ret;
 562
 563	/* Fill in the DMA descriptor. */
 564	desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
 565		    MXS_DCP_CONTROL0_INTERRUPT |
 566		    MXS_DCP_CONTROL0_ENABLE_HASH;
 567	if (rctx->init)
 568		desc->control0 |= MXS_DCP_CONTROL0_HASH_INIT;
 569
 570	desc->control1 = actx->alg;
 571	desc->next_cmd_addr = 0;
 572	desc->source = buf_phys;
 573	desc->destination = 0;
 574	desc->size = actx->fill;
 575	desc->payload = 0;
 576	desc->status = 0;
 577
 578	/*
 579	 * Align driver with hw behavior when generating null hashes
 580	 */
 581	if (rctx->init && rctx->fini && desc->size == 0) {
 582		struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
 583		const uint8_t *sha_buf =
 584			(actx->alg == MXS_DCP_CONTROL1_HASH_SELECT_SHA1) ?
 585			sha1_null_hash : sha256_null_hash;
 586		memcpy(sdcp->coh->sha_out_buf, sha_buf, halg->digestsize);
 587		ret = 0;
 588		goto done_run;
 589	}
 590
 591	/* Set HASH_TERM bit for last transfer block. */
 592	if (rctx->fini) {
 593		digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf,
 594					     DCP_SHA_PAY_SZ, DMA_FROM_DEVICE);
 595		ret = dma_mapping_error(sdcp->dev, digest_phys);
 596		if (ret)
 597			goto done_run;
 598
 599		desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM;
 600		desc->payload = digest_phys;
 601	}
 602
 603	ret = mxs_dcp_start_dma(actx);
 604
 605	if (rctx->fini)
 606		dma_unmap_single(sdcp->dev, digest_phys, DCP_SHA_PAY_SZ,
 607				 DMA_FROM_DEVICE);
 608
 609done_run:
 610	dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
 611
 612	return ret;
 613}
 614
 615static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
 616{
 617	struct dcp *sdcp = global_sdcp;
 618
 619	struct ahash_request *req = ahash_request_cast(arq);
 620	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 621	struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
 622	struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
 623	struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
 
 624
 625	uint8_t *in_buf = sdcp->coh->sha_in_buf;
 626	uint8_t *out_buf = sdcp->coh->sha_out_buf;
 
 627
 628	struct scatterlist *src;
 629
 630	unsigned int i, len, clen, oft = 0;
 631	int ret;
 632
 633	int fin = rctx->fini;
 634	if (fin)
 635		rctx->fini = 0;
 636
 637	src = req->src;
 638	len = req->nbytes;
 
 639
 640	while (len) {
 641		if (actx->fill + len > DCP_BUF_SZ)
 642			clen = DCP_BUF_SZ - actx->fill;
 643		else
 644			clen = len;
 645
 646		scatterwalk_map_and_copy(in_buf + actx->fill, src, oft, clen,
 647					 0);
 648
 649		len -= clen;
 650		oft += clen;
 651		actx->fill += clen;
 652
 653		/*
 654		 * If we filled the buffer and still have some
 655		 * more data, submit the buffer.
 656		 */
 657		if (len && actx->fill == DCP_BUF_SZ) {
 658			ret = mxs_dcp_run_sha(req);
 659			if (ret)
 660				return ret;
 661			actx->fill = 0;
 662			rctx->init = 0;
 663		}
 664	}
 665
 666	if (fin) {
 667		rctx->fini = 1;
 668
 669		/* Submit whatever is left. */
 670		if (!req->result)
 671			return -EINVAL;
 672
 673		ret = mxs_dcp_run_sha(req);
 674		if (ret)
 675			return ret;
 676
 677		actx->fill = 0;
 678
 679		/* For some reason the result is flipped */
 680		for (i = 0; i < halg->digestsize; i++)
 681			req->result[i] = out_buf[halg->digestsize - i - 1];
 
 
 682	}
 683
 684	return 0;
 685}
 686
 687static int dcp_chan_thread_sha(void *data)
 688{
 689	struct dcp *sdcp = global_sdcp;
 690	const int chan = DCP_CHAN_HASH_SHA;
 691
 692	struct crypto_async_request *backlog;
 693	struct crypto_async_request *arq;
 694	int ret;
 695
 696	while (!kthread_should_stop()) {
 697		set_current_state(TASK_INTERRUPTIBLE);
 698
 699		spin_lock(&sdcp->lock[chan]);
 700		backlog = crypto_get_backlog(&sdcp->queue[chan]);
 701		arq = crypto_dequeue_request(&sdcp->queue[chan]);
 702		spin_unlock(&sdcp->lock[chan]);
 703
 704		if (!backlog && !arq) {
 705			schedule();
 706			continue;
 707		}
 708
 709		set_current_state(TASK_RUNNING);
 
 
 
 710
 711		if (backlog)
 712			crypto_request_complete(backlog, -EINPROGRESS);
 713
 714		if (arq) {
 
 
 
 715			ret = dcp_sha_req_to_buf(arq);
 716			crypto_request_complete(arq, ret);
 
 
 
 717		}
 718	}
 
 
 719
 720	return 0;
 721}
 722
 723static int dcp_sha_init(struct ahash_request *req)
 724{
 725	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 726	struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
 727
 728	struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
 729
 730	/*
 731	 * Start hashing session. The code below only inits the
 732	 * hashing session context, nothing more.
 733	 */
 734	memset(actx, 0, sizeof(*actx));
 735
 736	if (strcmp(halg->base.cra_name, "sha1") == 0)
 737		actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA1;
 738	else
 739		actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA256;
 740
 741	actx->fill = 0;
 742	actx->hot = 0;
 743	actx->chan = DCP_CHAN_HASH_SHA;
 744
 745	mutex_init(&actx->mutex);
 746
 747	return 0;
 748}
 749
 750static int dcp_sha_update_fx(struct ahash_request *req, int fini)
 751{
 752	struct dcp *sdcp = global_sdcp;
 753
 754	struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
 755	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 756	struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
 757
 758	int ret;
 759
 760	/*
 761	 * Ignore requests that have no data in them and are not
 762	 * the trailing requests in the stream of requests.
 763	 */
 764	if (!req->nbytes && !fini)
 765		return 0;
 766
 767	mutex_lock(&actx->mutex);
 768
 769	rctx->fini = fini;
 770
 771	if (!actx->hot) {
 772		actx->hot = 1;
 773		rctx->init = 1;
 774	}
 775
 776	spin_lock(&sdcp->lock[actx->chan]);
 777	ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
 778	spin_unlock(&sdcp->lock[actx->chan]);
 779
 780	wake_up_process(sdcp->thread[actx->chan]);
 781	mutex_unlock(&actx->mutex);
 782
 783	return ret;
 784}
 785
 786static int dcp_sha_update(struct ahash_request *req)
 787{
 788	return dcp_sha_update_fx(req, 0);
 789}
 790
 791static int dcp_sha_final(struct ahash_request *req)
 792{
 793	ahash_request_set_crypt(req, NULL, req->result, 0);
 794	req->nbytes = 0;
 795	return dcp_sha_update_fx(req, 1);
 796}
 797
 798static int dcp_sha_finup(struct ahash_request *req)
 799{
 800	return dcp_sha_update_fx(req, 1);
 801}
 802
 803static int dcp_sha_digest(struct ahash_request *req)
 804{
 805	int ret;
 806
 807	ret = dcp_sha_init(req);
 808	if (ret)
 809		return ret;
 810
 811	return dcp_sha_finup(req);
 812}
 813
 814static int dcp_sha_import(struct ahash_request *req, const void *in)
 815{
 816	struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
 817	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 818	struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
 819	const struct dcp_export_state *export = in;
 820
 821	memset(rctx, 0, sizeof(struct dcp_sha_req_ctx));
 822	memset(actx, 0, sizeof(struct dcp_async_ctx));
 823	memcpy(rctx, &export->req_ctx, sizeof(struct dcp_sha_req_ctx));
 824	memcpy(actx, &export->async_ctx, sizeof(struct dcp_async_ctx));
 825
 826	return 0;
 827}
 828
 829static int dcp_sha_export(struct ahash_request *req, void *out)
 830{
 831	struct dcp_sha_req_ctx *rctx_state = ahash_request_ctx(req);
 832	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 833	struct dcp_async_ctx *actx_state = crypto_ahash_ctx(tfm);
 834	struct dcp_export_state *export = out;
 835
 836	memcpy(&export->req_ctx, rctx_state, sizeof(struct dcp_sha_req_ctx));
 837	memcpy(&export->async_ctx, actx_state, sizeof(struct dcp_async_ctx));
 838
 839	return 0;
 840}
 841
 842static int dcp_sha_cra_init(struct crypto_tfm *tfm)
 843{
 844	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 845				 sizeof(struct dcp_sha_req_ctx));
 846	return 0;
 847}
 848
 849static void dcp_sha_cra_exit(struct crypto_tfm *tfm)
 850{
 851}
 852
 853/* AES 128 ECB and AES 128 CBC */
 854static struct skcipher_alg dcp_aes_algs[] = {
 855	{
 856		.base.cra_name		= "ecb(aes)",
 857		.base.cra_driver_name	= "ecb-aes-dcp",
 858		.base.cra_priority	= 400,
 859		.base.cra_alignmask	= 15,
 860		.base.cra_flags		= CRYPTO_ALG_ASYNC |
 
 861					  CRYPTO_ALG_NEED_FALLBACK,
 862		.base.cra_blocksize	= AES_BLOCK_SIZE,
 863		.base.cra_ctxsize	= sizeof(struct dcp_async_ctx),
 864		.base.cra_module	= THIS_MODULE,
 865
 866		.min_keysize		= AES_MIN_KEY_SIZE,
 867		.max_keysize		= AES_MAX_KEY_SIZE,
 868		.setkey			= mxs_dcp_aes_setkey,
 869		.encrypt		= mxs_dcp_aes_ecb_encrypt,
 870		.decrypt		= mxs_dcp_aes_ecb_decrypt,
 871		.init			= mxs_dcp_aes_fallback_init_tfm,
 872		.exit			= mxs_dcp_aes_fallback_exit_tfm,
 
 
 
 
 873	}, {
 874		.base.cra_name		= "cbc(aes)",
 875		.base.cra_driver_name	= "cbc-aes-dcp",
 876		.base.cra_priority	= 400,
 877		.base.cra_alignmask	= 15,
 878		.base.cra_flags		= CRYPTO_ALG_ASYNC |
 
 879					  CRYPTO_ALG_NEED_FALLBACK,
 880		.base.cra_blocksize	= AES_BLOCK_SIZE,
 881		.base.cra_ctxsize	= sizeof(struct dcp_async_ctx),
 882		.base.cra_module	= THIS_MODULE,
 883
 884		.min_keysize		= AES_MIN_KEY_SIZE,
 885		.max_keysize		= AES_MAX_KEY_SIZE,
 886		.setkey			= mxs_dcp_aes_setkey,
 887		.encrypt		= mxs_dcp_aes_cbc_encrypt,
 888		.decrypt		= mxs_dcp_aes_cbc_decrypt,
 889		.ivsize			= AES_BLOCK_SIZE,
 890		.init			= mxs_dcp_aes_fallback_init_tfm,
 891		.exit			= mxs_dcp_aes_fallback_exit_tfm,
 
 
 
 
 892	},
 893};
 894
 895/* SHA1 */
 896static struct ahash_alg dcp_sha1_alg = {
 897	.init	= dcp_sha_init,
 898	.update	= dcp_sha_update,
 899	.final	= dcp_sha_final,
 900	.finup	= dcp_sha_finup,
 901	.digest	= dcp_sha_digest,
 902	.import = dcp_sha_import,
 903	.export = dcp_sha_export,
 904	.halg	= {
 905		.digestsize	= SHA1_DIGEST_SIZE,
 906		.statesize	= sizeof(struct dcp_export_state),
 907		.base		= {
 908			.cra_name		= "sha1",
 909			.cra_driver_name	= "sha1-dcp",
 910			.cra_priority		= 400,
 
 911			.cra_flags		= CRYPTO_ALG_ASYNC,
 912			.cra_blocksize		= SHA1_BLOCK_SIZE,
 913			.cra_ctxsize		= sizeof(struct dcp_async_ctx),
 914			.cra_module		= THIS_MODULE,
 915			.cra_init		= dcp_sha_cra_init,
 916			.cra_exit		= dcp_sha_cra_exit,
 917		},
 918	},
 919};
 920
 921/* SHA256 */
 922static struct ahash_alg dcp_sha256_alg = {
 923	.init	= dcp_sha_init,
 924	.update	= dcp_sha_update,
 925	.final	= dcp_sha_final,
 926	.finup	= dcp_sha_finup,
 927	.digest	= dcp_sha_digest,
 928	.import = dcp_sha_import,
 929	.export = dcp_sha_export,
 930	.halg	= {
 931		.digestsize	= SHA256_DIGEST_SIZE,
 932		.statesize	= sizeof(struct dcp_export_state),
 933		.base		= {
 934			.cra_name		= "sha256",
 935			.cra_driver_name	= "sha256-dcp",
 936			.cra_priority		= 400,
 
 937			.cra_flags		= CRYPTO_ALG_ASYNC,
 938			.cra_blocksize		= SHA256_BLOCK_SIZE,
 939			.cra_ctxsize		= sizeof(struct dcp_async_ctx),
 940			.cra_module		= THIS_MODULE,
 941			.cra_init		= dcp_sha_cra_init,
 942			.cra_exit		= dcp_sha_cra_exit,
 943		},
 944	},
 945};
 946
 947static irqreturn_t mxs_dcp_irq(int irq, void *context)
 948{
 949	struct dcp *sdcp = context;
 950	uint32_t stat;
 951	int i;
 952
 953	stat = readl(sdcp->base + MXS_DCP_STAT);
 954	stat &= MXS_DCP_STAT_IRQ_MASK;
 955	if (!stat)
 956		return IRQ_NONE;
 957
 958	/* Clear the interrupts. */
 959	writel(stat, sdcp->base + MXS_DCP_STAT_CLR);
 960
 961	/* Complete the DMA requests that finished. */
 962	for (i = 0; i < DCP_MAX_CHANS; i++)
 963		if (stat & (1 << i))
 964			complete(&sdcp->completion[i]);
 965
 966	return IRQ_HANDLED;
 967}
 968
 969static int mxs_dcp_probe(struct platform_device *pdev)
 970{
 971	struct device *dev = &pdev->dev;
 972	struct dcp *sdcp = NULL;
 973	int i, ret;
 
 
 974	int dcp_vmi_irq, dcp_irq;
 975
 976	if (global_sdcp) {
 977		dev_err(dev, "Only one DCP instance allowed!\n");
 978		return -ENODEV;
 979	}
 980
 
 981	dcp_vmi_irq = platform_get_irq(pdev, 0);
 982	if (dcp_vmi_irq < 0)
 983		return dcp_vmi_irq;
 984
 985	dcp_irq = platform_get_irq(pdev, 1);
 986	if (dcp_irq < 0)
 987		return dcp_irq;
 988
 989	sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL);
 990	if (!sdcp)
 991		return -ENOMEM;
 992
 993	sdcp->dev = dev;
 994	sdcp->base = devm_platform_ioremap_resource(pdev, 0);
 995	if (IS_ERR(sdcp->base))
 996		return PTR_ERR(sdcp->base);
 997
 998
 999	ret = devm_request_irq(dev, dcp_vmi_irq, mxs_dcp_irq, 0,
1000			       "dcp-vmi-irq", sdcp);
1001	if (ret) {
1002		dev_err(dev, "Failed to claim DCP VMI IRQ!\n");
1003		return ret;
1004	}
1005
1006	ret = devm_request_irq(dev, dcp_irq, mxs_dcp_irq, 0,
1007			       "dcp-irq", sdcp);
1008	if (ret) {
1009		dev_err(dev, "Failed to claim DCP IRQ!\n");
1010		return ret;
1011	}
1012
1013	/* Allocate coherent helper block. */
1014	sdcp->coh = devm_kzalloc(dev, sizeof(*sdcp->coh) + DCP_ALIGNMENT,
1015				   GFP_KERNEL);
1016	if (!sdcp->coh)
1017		return -ENOMEM;
1018
1019	/* Re-align the structure so it fits the DCP constraints. */
1020	sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT);
1021
1022	/* DCP clock is optional, only used on some SOCs */
1023	sdcp->dcp_clk = devm_clk_get_optional_enabled(dev, "dcp");
1024	if (IS_ERR(sdcp->dcp_clk))
1025		return PTR_ERR(sdcp->dcp_clk);
1026
1027	/* Restart the DCP block. */
1028	ret = stmp_reset_block(sdcp->base);
1029	if (ret) {
1030		dev_err(dev, "Failed reset\n");
1031		return ret;
1032	}
1033
1034	/* Initialize control register. */
1035	writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES |
1036	       MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING | 0xf,
1037	       sdcp->base + MXS_DCP_CTRL);
1038
1039	/* Enable all DCP DMA channels. */
1040	writel(MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK,
1041	       sdcp->base + MXS_DCP_CHANNELCTRL);
1042
1043	/*
1044	 * We do not enable context switching. Give the context buffer a
1045	 * pointer to an illegal address so if context switching is
1046	 * inadvertantly enabled, the DCP will return an error instead of
1047	 * trashing good memory. The DCP DMA cannot access ROM, so any ROM
1048	 * address will do.
1049	 */
1050	writel(0xffff0000, sdcp->base + MXS_DCP_CONTEXT);
1051	for (i = 0; i < DCP_MAX_CHANS; i++)
1052		writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(i));
1053	writel(0xffffffff, sdcp->base + MXS_DCP_STAT_CLR);
1054
1055	global_sdcp = sdcp;
1056
1057	platform_set_drvdata(pdev, sdcp);
1058
1059	for (i = 0; i < DCP_MAX_CHANS; i++) {
1060		spin_lock_init(&sdcp->lock[i]);
1061		init_completion(&sdcp->completion[i]);
1062		crypto_init_queue(&sdcp->queue[i], 50);
1063	}
1064
1065	/* Create the SHA and AES handler threads. */
1066	sdcp->thread[DCP_CHAN_HASH_SHA] = kthread_run(dcp_chan_thread_sha,
1067						      NULL, "mxs_dcp_chan/sha");
1068	if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) {
1069		dev_err(dev, "Error starting SHA thread!\n");
1070		ret = PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]);
1071		return ret;
1072	}
1073
1074	sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes,
1075						    NULL, "mxs_dcp_chan/aes");
1076	if (IS_ERR(sdcp->thread[DCP_CHAN_CRYPTO])) {
1077		dev_err(dev, "Error starting SHA thread!\n");
1078		ret = PTR_ERR(sdcp->thread[DCP_CHAN_CRYPTO]);
1079		goto err_destroy_sha_thread;
1080	}
1081
1082	/* Register the various crypto algorithms. */
1083	sdcp->caps = readl(sdcp->base + MXS_DCP_CAPABILITY1);
1084
1085	if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) {
1086		ret = crypto_register_skciphers(dcp_aes_algs,
1087						ARRAY_SIZE(dcp_aes_algs));
1088		if (ret) {
1089			/* Failed to register algorithm. */
1090			dev_err(dev, "Failed to register AES crypto!\n");
1091			goto err_destroy_aes_thread;
1092		}
1093	}
1094
1095	if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) {
1096		ret = crypto_register_ahash(&dcp_sha1_alg);
1097		if (ret) {
1098			dev_err(dev, "Failed to register %s hash!\n",
1099				dcp_sha1_alg.halg.base.cra_name);
1100			goto err_unregister_aes;
1101		}
1102	}
1103
1104	if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) {
1105		ret = crypto_register_ahash(&dcp_sha256_alg);
1106		if (ret) {
1107			dev_err(dev, "Failed to register %s hash!\n",
1108				dcp_sha256_alg.halg.base.cra_name);
1109			goto err_unregister_sha1;
1110		}
1111	}
1112
1113	return 0;
1114
1115err_unregister_sha1:
1116	if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
1117		crypto_unregister_ahash(&dcp_sha1_alg);
1118
1119err_unregister_aes:
1120	if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
1121		crypto_unregister_skciphers(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
1122
1123err_destroy_aes_thread:
1124	kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
1125
1126err_destroy_sha_thread:
1127	kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
1128
1129	return ret;
1130}
1131
1132static void mxs_dcp_remove(struct platform_device *pdev)
1133{
1134	struct dcp *sdcp = platform_get_drvdata(pdev);
1135
1136	if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256)
1137		crypto_unregister_ahash(&dcp_sha256_alg);
1138
1139	if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
1140		crypto_unregister_ahash(&dcp_sha1_alg);
1141
1142	if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
1143		crypto_unregister_skciphers(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
1144
1145	kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
1146	kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
1147
1148	platform_set_drvdata(pdev, NULL);
1149
1150	global_sdcp = NULL;
 
 
1151}
1152
1153static const struct of_device_id mxs_dcp_dt_ids[] = {
1154	{ .compatible = "fsl,imx23-dcp", .data = NULL, },
1155	{ .compatible = "fsl,imx28-dcp", .data = NULL, },
1156	{ /* sentinel */ }
1157};
1158
1159MODULE_DEVICE_TABLE(of, mxs_dcp_dt_ids);
1160
1161static struct platform_driver mxs_dcp_driver = {
1162	.probe	= mxs_dcp_probe,
1163	.remove_new = mxs_dcp_remove,
1164	.driver	= {
1165		.name		= "mxs-dcp",
1166		.of_match_table	= mxs_dcp_dt_ids,
1167	},
1168};
1169
1170module_platform_driver(mxs_dcp_driver);
1171
1172MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
1173MODULE_DESCRIPTION("Freescale MXS DCP Driver");
1174MODULE_LICENSE("GPL");
1175MODULE_ALIAS("platform:mxs-dcp");
v4.6
 
   1/*
   2 * Freescale i.MX23/i.MX28 Data Co-Processor driver
   3 *
   4 * Copyright (C) 2013 Marek Vasut <marex@denx.de>
   5 *
   6 * The code contained herein is licensed under the GNU General Public
   7 * License. You may obtain a copy of the GNU General Public License
   8 * Version 2 or later at the following locations:
   9 *
  10 * http://www.opensource.org/licenses/gpl-license.html
  11 * http://www.gnu.org/copyleft/gpl.html
  12 */
  13
  14#include <linux/crypto.h>
  15#include <linux/dma-mapping.h>
  16#include <linux/interrupt.h>
  17#include <linux/io.h>
  18#include <linux/kernel.h>
  19#include <linux/kthread.h>
  20#include <linux/module.h>
  21#include <linux/of.h>
  22#include <linux/platform_device.h>
  23#include <linux/stmp_device.h>
 
  24
  25#include <crypto/aes.h>
  26#include <crypto/sha.h>
 
  27#include <crypto/internal/hash.h>
 
 
  28
  29#define DCP_MAX_CHANS	4
  30#define DCP_BUF_SZ	PAGE_SIZE
 
  31
  32#define DCP_ALIGNMENT	64
  33
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  34/* DCP DMA descriptor. */
  35struct dcp_dma_desc {
  36	uint32_t	next_cmd_addr;
  37	uint32_t	control0;
  38	uint32_t	control1;
  39	uint32_t	source;
  40	uint32_t	destination;
  41	uint32_t	size;
  42	uint32_t	payload;
  43	uint32_t	status;
  44};
  45
  46/* Coherent aligned block for bounce buffering. */
  47struct dcp_coherent_block {
  48	uint8_t			aes_in_buf[DCP_BUF_SZ];
  49	uint8_t			aes_out_buf[DCP_BUF_SZ];
  50	uint8_t			sha_in_buf[DCP_BUF_SZ];
 
  51
  52	uint8_t			aes_key[2 * AES_KEYSIZE_128];
  53
  54	struct dcp_dma_desc	desc[DCP_MAX_CHANS];
  55};
  56
  57struct dcp {
  58	struct device			*dev;
  59	void __iomem			*base;
  60
  61	uint32_t			caps;
  62
  63	struct dcp_coherent_block	*coh;
  64
  65	struct completion		completion[DCP_MAX_CHANS];
  66	struct mutex			mutex[DCP_MAX_CHANS];
  67	struct task_struct		*thread[DCP_MAX_CHANS];
  68	struct crypto_queue		queue[DCP_MAX_CHANS];
 
  69};
  70
  71enum dcp_chan {
  72	DCP_CHAN_HASH_SHA	= 0,
  73	DCP_CHAN_CRYPTO		= 2,
  74};
  75
  76struct dcp_async_ctx {
  77	/* Common context */
  78	enum dcp_chan	chan;
  79	uint32_t	fill;
  80
  81	/* SHA Hash-specific context */
  82	struct mutex			mutex;
  83	uint32_t			alg;
  84	unsigned int			hot:1;
  85
  86	/* Crypto-specific context */
  87	struct crypto_ablkcipher	*fallback;
  88	unsigned int			key_len;
  89	uint8_t				key[AES_KEYSIZE_128];
  90};
  91
  92struct dcp_aes_req_ctx {
  93	unsigned int	enc:1;
  94	unsigned int	ecb:1;
 
  95};
  96
  97struct dcp_sha_req_ctx {
  98	unsigned int	init:1;
  99	unsigned int	fini:1;
 100};
 101
 
 
 
 
 
 102/*
 103 * There can even be only one instance of the MXS DCP due to the
 104 * design of Linux Crypto API.
 105 */
 106static struct dcp *global_sdcp;
 107
 108/* DCP register layout. */
 109#define MXS_DCP_CTRL				0x00
 110#define MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES	(1 << 23)
 111#define MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING	(1 << 22)
 112
 113#define MXS_DCP_STAT				0x10
 114#define MXS_DCP_STAT_CLR			0x18
 115#define MXS_DCP_STAT_IRQ_MASK			0xf
 116
 117#define MXS_DCP_CHANNELCTRL			0x20
 118#define MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK	0xff
 119
 120#define MXS_DCP_CAPABILITY1			0x40
 121#define MXS_DCP_CAPABILITY1_SHA256		(4 << 16)
 122#define MXS_DCP_CAPABILITY1_SHA1		(1 << 16)
 123#define MXS_DCP_CAPABILITY1_AES128		(1 << 0)
 124
 125#define MXS_DCP_CONTEXT				0x50
 126
 127#define MXS_DCP_CH_N_CMDPTR(n)			(0x100 + ((n) * 0x40))
 128
 129#define MXS_DCP_CH_N_SEMA(n)			(0x110 + ((n) * 0x40))
 130
 131#define MXS_DCP_CH_N_STAT(n)			(0x120 + ((n) * 0x40))
 132#define MXS_DCP_CH_N_STAT_CLR(n)		(0x128 + ((n) * 0x40))
 133
 134/* DMA descriptor bits. */
 135#define MXS_DCP_CONTROL0_HASH_TERM		(1 << 13)
 136#define MXS_DCP_CONTROL0_HASH_INIT		(1 << 12)
 137#define MXS_DCP_CONTROL0_PAYLOAD_KEY		(1 << 11)
 138#define MXS_DCP_CONTROL0_CIPHER_ENCRYPT		(1 << 8)
 139#define MXS_DCP_CONTROL0_CIPHER_INIT		(1 << 9)
 140#define MXS_DCP_CONTROL0_ENABLE_HASH		(1 << 6)
 141#define MXS_DCP_CONTROL0_ENABLE_CIPHER		(1 << 5)
 142#define MXS_DCP_CONTROL0_DECR_SEMAPHORE		(1 << 1)
 143#define MXS_DCP_CONTROL0_INTERRUPT		(1 << 0)
 144
 145#define MXS_DCP_CONTROL1_HASH_SELECT_SHA256	(2 << 16)
 146#define MXS_DCP_CONTROL1_HASH_SELECT_SHA1	(0 << 16)
 147#define MXS_DCP_CONTROL1_CIPHER_MODE_CBC	(1 << 4)
 148#define MXS_DCP_CONTROL1_CIPHER_MODE_ECB	(0 << 4)
 149#define MXS_DCP_CONTROL1_CIPHER_SELECT_AES128	(0 << 0)
 150
 151static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
 152{
 
 153	struct dcp *sdcp = global_sdcp;
 154	const int chan = actx->chan;
 155	uint32_t stat;
 156	unsigned long ret;
 157	struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
 158
 159	dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc),
 160					      DMA_TO_DEVICE);
 161
 
 
 
 
 162	reinit_completion(&sdcp->completion[chan]);
 163
 164	/* Clear status register. */
 165	writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(chan));
 166
 167	/* Load the DMA descriptor. */
 168	writel(desc_phys, sdcp->base + MXS_DCP_CH_N_CMDPTR(chan));
 169
 170	/* Increment the semaphore to start the DMA transfer. */
 171	writel(1, sdcp->base + MXS_DCP_CH_N_SEMA(chan));
 172
 173	ret = wait_for_completion_timeout(&sdcp->completion[chan],
 174					  msecs_to_jiffies(1000));
 175	if (!ret) {
 176		dev_err(sdcp->dev, "Channel %i timeout (DCP_STAT=0x%08x)\n",
 177			chan, readl(sdcp->base + MXS_DCP_STAT));
 178		return -ETIMEDOUT;
 179	}
 180
 181	stat = readl(sdcp->base + MXS_DCP_CH_N_STAT(chan));
 182	if (stat & 0xff) {
 183		dev_err(sdcp->dev, "Channel %i error (CH_STAT=0x%08x)\n",
 184			chan, stat);
 185		return -EINVAL;
 186	}
 187
 188	dma_unmap_single(sdcp->dev, desc_phys, sizeof(*desc), DMA_TO_DEVICE);
 189
 190	return 0;
 191}
 192
 193/*
 194 * Encryption (AES128)
 195 */
 196static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
 197			   struct ablkcipher_request *req, int init)
 198{
 
 199	struct dcp *sdcp = global_sdcp;
 200	struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
 201	struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
 202	int ret;
 203
 204	dma_addr_t key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
 205					     2 * AES_KEYSIZE_128,
 206					     DMA_TO_DEVICE);
 207	dma_addr_t src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
 208					     DCP_BUF_SZ, DMA_TO_DEVICE);
 209	dma_addr_t dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
 210					     DCP_BUF_SZ, DMA_FROM_DEVICE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 211
 212	/* Fill in the DMA descriptor. */
 213	desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
 214		    MXS_DCP_CONTROL0_INTERRUPT |
 215		    MXS_DCP_CONTROL0_ENABLE_CIPHER;
 216
 217	/* Payload contains the key. */
 218	desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY;
 219
 220	if (rctx->enc)
 221		desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT;
 222	if (init)
 223		desc->control0 |= MXS_DCP_CONTROL0_CIPHER_INIT;
 224
 225	desc->control1 = MXS_DCP_CONTROL1_CIPHER_SELECT_AES128;
 226
 227	if (rctx->ecb)
 228		desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_ECB;
 229	else
 230		desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_CBC;
 231
 232	desc->next_cmd_addr = 0;
 233	desc->source = src_phys;
 234	desc->destination = dst_phys;
 235	desc->size = actx->fill;
 236	desc->payload = key_phys;
 237	desc->status = 0;
 238
 239	ret = mxs_dcp_start_dma(actx);
 240
 
 
 
 
 
 241	dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
 242			 DMA_TO_DEVICE);
 243	dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
 244	dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE);
 245
 246	return ret;
 247}
 248
 249static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
 250{
 251	struct dcp *sdcp = global_sdcp;
 252
 253	struct ablkcipher_request *req = ablkcipher_request_cast(arq);
 254	struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
 255	struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
 256
 257	struct scatterlist *dst = req->dst;
 258	struct scatterlist *src = req->src;
 259	const int nents = sg_nents(req->src);
 260
 261	const int out_off = DCP_BUF_SZ;
 262	uint8_t *in_buf = sdcp->coh->aes_in_buf;
 263	uint8_t *out_buf = sdcp->coh->aes_out_buf;
 264
 265	uint8_t *out_tmp, *src_buf, *dst_buf = NULL;
 266	uint32_t dst_off = 0;
 
 
 267
 268	uint8_t *key = sdcp->coh->aes_key;
 269
 270	int ret = 0;
 271	int split = 0;
 272	unsigned int i, len, clen, rem = 0;
 273	int init = 0;
 
 274
 275	actx->fill = 0;
 276
 277	/* Copy the key from the temporary location. */
 278	memcpy(key, actx->key, actx->key_len);
 279
 280	if (!rctx->ecb) {
 281		/* Copy the CBC IV just past the key. */
 282		memcpy(key + AES_KEYSIZE_128, req->info, AES_KEYSIZE_128);
 283		/* CBC needs the INIT set. */
 284		init = 1;
 285	} else {
 286		memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128);
 287	}
 288
 289	for_each_sg(req->src, src, nents, i) {
 290		src_buf = sg_virt(src);
 291		len = sg_dma_len(src);
 
 
 
 
 
 292
 293		do {
 294			if (actx->fill + len > out_off)
 295				clen = out_off - actx->fill;
 296			else
 297				clen = len;
 298
 299			memcpy(in_buf + actx->fill, src_buf, clen);
 300			len -= clen;
 301			src_buf += clen;
 302			actx->fill += clen;
 303
 304			/*
 305			 * If we filled the buffer or this is the last SG,
 306			 * submit the buffer.
 307			 */
 308			if (actx->fill == out_off || sg_is_last(src)) {
 
 309				ret = mxs_dcp_run_aes(actx, req, init);
 310				if (ret)
 311					return ret;
 312				init = 0;
 313
 314				out_tmp = out_buf;
 315				while (dst && actx->fill) {
 316					if (!split) {
 317						dst_buf = sg_virt(dst);
 318						dst_off = 0;
 319					}
 320					rem = min(sg_dma_len(dst) - dst_off,
 321						  actx->fill);
 322
 323					memcpy(dst_buf + dst_off, out_tmp, rem);
 324					out_tmp += rem;
 325					dst_off += rem;
 326					actx->fill -= rem;
 327
 328					if (dst_off == sg_dma_len(dst)) {
 329						dst = sg_next(dst);
 330						split = 0;
 331					} else {
 332						split = 1;
 333					}
 334				}
 335			}
 336		} while (len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 337	}
 338
 339	return ret;
 340}
 341
 342static int dcp_chan_thread_aes(void *data)
 343{
 344	struct dcp *sdcp = global_sdcp;
 345	const int chan = DCP_CHAN_CRYPTO;
 346
 347	struct crypto_async_request *backlog;
 348	struct crypto_async_request *arq;
 349
 350	int ret;
 351
 352	do {
 353		__set_current_state(TASK_INTERRUPTIBLE);
 354
 355		mutex_lock(&sdcp->mutex[chan]);
 356		backlog = crypto_get_backlog(&sdcp->queue[chan]);
 357		arq = crypto_dequeue_request(&sdcp->queue[chan]);
 358		mutex_unlock(&sdcp->mutex[chan]);
 
 
 
 
 
 
 
 359
 360		if (backlog)
 361			backlog->complete(backlog, -EINPROGRESS);
 362
 363		if (arq) {
 364			ret = mxs_dcp_aes_block_crypt(arq);
 365			arq->complete(arq, ret);
 366			continue;
 367		}
 368
 369		schedule();
 370	} while (!kthread_should_stop());
 371
 372	return 0;
 373}
 374
 375static int mxs_dcp_block_fallback(struct ablkcipher_request *req, int enc)
 376{
 377	struct crypto_tfm *tfm =
 378		crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
 379	struct dcp_async_ctx *ctx = crypto_ablkcipher_ctx(
 380		crypto_ablkcipher_reqtfm(req));
 381	int ret;
 382
 383	ablkcipher_request_set_tfm(req, ctx->fallback);
 
 
 
 
 384
 385	if (enc)
 386		ret = crypto_ablkcipher_encrypt(req);
 387	else
 388		ret = crypto_ablkcipher_decrypt(req);
 389
 390	ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
 391
 392	return ret;
 393}
 394
 395static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb)
 396{
 397	struct dcp *sdcp = global_sdcp;
 398	struct crypto_async_request *arq = &req->base;
 399	struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
 400	struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
 401	int ret;
 402
 403	if (unlikely(actx->key_len != AES_KEYSIZE_128))
 404		return mxs_dcp_block_fallback(req, enc);
 405
 406	rctx->enc = enc;
 407	rctx->ecb = ecb;
 408	actx->chan = DCP_CHAN_CRYPTO;
 409
 410	mutex_lock(&sdcp->mutex[actx->chan]);
 411	ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
 412	mutex_unlock(&sdcp->mutex[actx->chan]);
 413
 414	wake_up_process(sdcp->thread[actx->chan]);
 415
 416	return -EINPROGRESS;
 417}
 418
 419static int mxs_dcp_aes_ecb_decrypt(struct ablkcipher_request *req)
 420{
 421	return mxs_dcp_aes_enqueue(req, 0, 1);
 422}
 423
 424static int mxs_dcp_aes_ecb_encrypt(struct ablkcipher_request *req)
 425{
 426	return mxs_dcp_aes_enqueue(req, 1, 1);
 427}
 428
 429static int mxs_dcp_aes_cbc_decrypt(struct ablkcipher_request *req)
 430{
 431	return mxs_dcp_aes_enqueue(req, 0, 0);
 432}
 433
 434static int mxs_dcp_aes_cbc_encrypt(struct ablkcipher_request *req)
 435{
 436	return mxs_dcp_aes_enqueue(req, 1, 0);
 437}
 438
 439static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
 440			      unsigned int len)
 441{
 442	struct dcp_async_ctx *actx = crypto_ablkcipher_ctx(tfm);
 443	unsigned int ret;
 444
 445	/*
 446	 * AES 128 is supposed by the hardware, store key into temporary
 447	 * buffer and exit. We must use the temporary buffer here, since
 448	 * there can still be an operation in progress.
 449	 */
 450	actx->key_len = len;
 451	if (len == AES_KEYSIZE_128) {
 452		memcpy(actx->key, key, len);
 453		return 0;
 454	}
 455
 456	/* Check if the key size is supported by kernel at all. */
 457	if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
 458		tfm->base.crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
 459		return -EINVAL;
 460	}
 461
 462	/*
 463	 * If the requested AES key size is not supported by the hardware,
 464	 * but is supported by in-kernel software implementation, we use
 465	 * software fallback.
 466	 */
 467	actx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
 468	actx->fallback->base.crt_flags |=
 469		tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK;
 470
 471	ret = crypto_ablkcipher_setkey(actx->fallback, key, len);
 472	if (!ret)
 473		return 0;
 474
 475	tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
 476	tfm->base.crt_flags |=
 477		actx->fallback->base.crt_flags & CRYPTO_TFM_RES_MASK;
 478
 479	return ret;
 480}
 481
 482static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm)
 483{
 484	const char *name = crypto_tfm_alg_name(tfm);
 485	const uint32_t flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
 486	struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
 487	struct crypto_ablkcipher *blk;
 488
 489	blk = crypto_alloc_ablkcipher(name, 0, flags);
 490	if (IS_ERR(blk))
 491		return PTR_ERR(blk);
 492
 493	actx->fallback = blk;
 494	tfm->crt_ablkcipher.reqsize = sizeof(struct dcp_aes_req_ctx);
 
 495	return 0;
 496}
 497
 498static void mxs_dcp_aes_fallback_exit(struct crypto_tfm *tfm)
 499{
 500	struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
 501
 502	crypto_free_ablkcipher(actx->fallback);
 503	actx->fallback = NULL;
 504}
 505
 506/*
 507 * Hashing (SHA1/SHA256)
 508 */
 509static int mxs_dcp_run_sha(struct ahash_request *req)
 510{
 511	struct dcp *sdcp = global_sdcp;
 512	int ret;
 513
 514	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 515	struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
 516	struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
 517	struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
 518
 519	struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
 520
 521	dma_addr_t digest_phys = 0;
 522	dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf,
 523					     DCP_BUF_SZ, DMA_TO_DEVICE);
 524
 
 
 
 
 525	/* Fill in the DMA descriptor. */
 526	desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
 527		    MXS_DCP_CONTROL0_INTERRUPT |
 528		    MXS_DCP_CONTROL0_ENABLE_HASH;
 529	if (rctx->init)
 530		desc->control0 |= MXS_DCP_CONTROL0_HASH_INIT;
 531
 532	desc->control1 = actx->alg;
 533	desc->next_cmd_addr = 0;
 534	desc->source = buf_phys;
 535	desc->destination = 0;
 536	desc->size = actx->fill;
 537	desc->payload = 0;
 538	desc->status = 0;
 539
 
 
 
 
 
 
 
 
 
 
 
 
 
 540	/* Set HASH_TERM bit for last transfer block. */
 541	if (rctx->fini) {
 542		digest_phys = dma_map_single(sdcp->dev, req->result,
 543					     halg->digestsize, DMA_FROM_DEVICE);
 
 
 
 
 544		desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM;
 545		desc->payload = digest_phys;
 546	}
 547
 548	ret = mxs_dcp_start_dma(actx);
 549
 550	if (rctx->fini)
 551		dma_unmap_single(sdcp->dev, digest_phys, halg->digestsize,
 552				 DMA_FROM_DEVICE);
 553
 
 554	dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
 555
 556	return ret;
 557}
 558
 559static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
 560{
 561	struct dcp *sdcp = global_sdcp;
 562
 563	struct ahash_request *req = ahash_request_cast(arq);
 564	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 565	struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
 566	struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
 567	struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
 568	const int nents = sg_nents(req->src);
 569
 570	uint8_t *in_buf = sdcp->coh->sha_in_buf;
 571
 572	uint8_t *src_buf;
 573
 574	struct scatterlist *src;
 575
 576	unsigned int i, len, clen;
 577	int ret;
 578
 579	int fin = rctx->fini;
 580	if (fin)
 581		rctx->fini = 0;
 582
 583	for_each_sg(req->src, src, nents, i) {
 584		src_buf = sg_virt(src);
 585		len = sg_dma_len(src);
 586
 587		do {
 588			if (actx->fill + len > DCP_BUF_SZ)
 589				clen = DCP_BUF_SZ - actx->fill;
 590			else
 591				clen = len;
 592
 593			memcpy(in_buf + actx->fill, src_buf, clen);
 594			len -= clen;
 595			src_buf += clen;
 596			actx->fill += clen;
 597
 598			/*
 599			 * If we filled the buffer and still have some
 600			 * more data, submit the buffer.
 601			 */
 602			if (len && actx->fill == DCP_BUF_SZ) {
 603				ret = mxs_dcp_run_sha(req);
 604				if (ret)
 605					return ret;
 606				actx->fill = 0;
 607				rctx->init = 0;
 608			}
 609		} while (len);
 
 610	}
 611
 612	if (fin) {
 613		rctx->fini = 1;
 614
 615		/* Submit whatever is left. */
 616		if (!req->result)
 617			return -EINVAL;
 618
 619		ret = mxs_dcp_run_sha(req);
 620		if (ret)
 621			return ret;
 622
 623		actx->fill = 0;
 624
 625		/* For some reason, the result is flipped. */
 626		for (i = 0; i < halg->digestsize / 2; i++) {
 627			swap(req->result[i],
 628			     req->result[halg->digestsize - i - 1]);
 629		}
 630	}
 631
 632	return 0;
 633}
 634
 635static int dcp_chan_thread_sha(void *data)
 636{
 637	struct dcp *sdcp = global_sdcp;
 638	const int chan = DCP_CHAN_HASH_SHA;
 639
 640	struct crypto_async_request *backlog;
 641	struct crypto_async_request *arq;
 
 642
 643	struct dcp_sha_req_ctx *rctx;
 
 644
 645	struct ahash_request *req;
 646	int ret, fini;
 
 
 647
 648	do {
 649		__set_current_state(TASK_INTERRUPTIBLE);
 
 
 650
 651		mutex_lock(&sdcp->mutex[chan]);
 652		backlog = crypto_get_backlog(&sdcp->queue[chan]);
 653		arq = crypto_dequeue_request(&sdcp->queue[chan]);
 654		mutex_unlock(&sdcp->mutex[chan]);
 655
 656		if (backlog)
 657			backlog->complete(backlog, -EINPROGRESS);
 658
 659		if (arq) {
 660			req = ahash_request_cast(arq);
 661			rctx = ahash_request_ctx(req);
 662
 663			ret = dcp_sha_req_to_buf(arq);
 664			fini = rctx->fini;
 665			arq->complete(arq, ret);
 666			if (!fini)
 667				continue;
 668		}
 669
 670		schedule();
 671	} while (!kthread_should_stop());
 672
 673	return 0;
 674}
 675
 676static int dcp_sha_init(struct ahash_request *req)
 677{
 678	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 679	struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
 680
 681	struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
 682
 683	/*
 684	 * Start hashing session. The code below only inits the
 685	 * hashing session context, nothing more.
 686	 */
 687	memset(actx, 0, sizeof(*actx));
 688
 689	if (strcmp(halg->base.cra_name, "sha1") == 0)
 690		actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA1;
 691	else
 692		actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA256;
 693
 694	actx->fill = 0;
 695	actx->hot = 0;
 696	actx->chan = DCP_CHAN_HASH_SHA;
 697
 698	mutex_init(&actx->mutex);
 699
 700	return 0;
 701}
 702
 703static int dcp_sha_update_fx(struct ahash_request *req, int fini)
 704{
 705	struct dcp *sdcp = global_sdcp;
 706
 707	struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
 708	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 709	struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
 710
 711	int ret;
 712
 713	/*
 714	 * Ignore requests that have no data in them and are not
 715	 * the trailing requests in the stream of requests.
 716	 */
 717	if (!req->nbytes && !fini)
 718		return 0;
 719
 720	mutex_lock(&actx->mutex);
 721
 722	rctx->fini = fini;
 723
 724	if (!actx->hot) {
 725		actx->hot = 1;
 726		rctx->init = 1;
 727	}
 728
 729	mutex_lock(&sdcp->mutex[actx->chan]);
 730	ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
 731	mutex_unlock(&sdcp->mutex[actx->chan]);
 732
 733	wake_up_process(sdcp->thread[actx->chan]);
 734	mutex_unlock(&actx->mutex);
 735
 736	return -EINPROGRESS;
 737}
 738
 739static int dcp_sha_update(struct ahash_request *req)
 740{
 741	return dcp_sha_update_fx(req, 0);
 742}
 743
 744static int dcp_sha_final(struct ahash_request *req)
 745{
 746	ahash_request_set_crypt(req, NULL, req->result, 0);
 747	req->nbytes = 0;
 748	return dcp_sha_update_fx(req, 1);
 749}
 750
 751static int dcp_sha_finup(struct ahash_request *req)
 752{
 753	return dcp_sha_update_fx(req, 1);
 754}
 755
 756static int dcp_sha_digest(struct ahash_request *req)
 757{
 758	int ret;
 759
 760	ret = dcp_sha_init(req);
 761	if (ret)
 762		return ret;
 763
 764	return dcp_sha_finup(req);
 765}
 766
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 767static int dcp_sha_cra_init(struct crypto_tfm *tfm)
 768{
 769	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 770				 sizeof(struct dcp_sha_req_ctx));
 771	return 0;
 772}
 773
 774static void dcp_sha_cra_exit(struct crypto_tfm *tfm)
 775{
 776}
 777
 778/* AES 128 ECB and AES 128 CBC */
 779static struct crypto_alg dcp_aes_algs[] = {
 780	{
 781		.cra_name		= "ecb(aes)",
 782		.cra_driver_name	= "ecb-aes-dcp",
 783		.cra_priority		= 400,
 784		.cra_alignmask		= 15,
 785		.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
 786					  CRYPTO_ALG_ASYNC |
 787					  CRYPTO_ALG_NEED_FALLBACK,
 788		.cra_init		= mxs_dcp_aes_fallback_init,
 789		.cra_exit		= mxs_dcp_aes_fallback_exit,
 790		.cra_blocksize		= AES_BLOCK_SIZE,
 791		.cra_ctxsize		= sizeof(struct dcp_async_ctx),
 792		.cra_type		= &crypto_ablkcipher_type,
 793		.cra_module		= THIS_MODULE,
 794		.cra_u	= {
 795			.ablkcipher = {
 796				.min_keysize	= AES_MIN_KEY_SIZE,
 797				.max_keysize	= AES_MAX_KEY_SIZE,
 798				.setkey		= mxs_dcp_aes_setkey,
 799				.encrypt	= mxs_dcp_aes_ecb_encrypt,
 800				.decrypt	= mxs_dcp_aes_ecb_decrypt
 801			},
 802		},
 803	}, {
 804		.cra_name		= "cbc(aes)",
 805		.cra_driver_name	= "cbc-aes-dcp",
 806		.cra_priority		= 400,
 807		.cra_alignmask		= 15,
 808		.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
 809					  CRYPTO_ALG_ASYNC |
 810					  CRYPTO_ALG_NEED_FALLBACK,
 811		.cra_init		= mxs_dcp_aes_fallback_init,
 812		.cra_exit		= mxs_dcp_aes_fallback_exit,
 813		.cra_blocksize		= AES_BLOCK_SIZE,
 814		.cra_ctxsize		= sizeof(struct dcp_async_ctx),
 815		.cra_type		= &crypto_ablkcipher_type,
 816		.cra_module		= THIS_MODULE,
 817		.cra_u = {
 818			.ablkcipher = {
 819				.min_keysize	= AES_MIN_KEY_SIZE,
 820				.max_keysize	= AES_MAX_KEY_SIZE,
 821				.setkey		= mxs_dcp_aes_setkey,
 822				.encrypt	= mxs_dcp_aes_cbc_encrypt,
 823				.decrypt	= mxs_dcp_aes_cbc_decrypt,
 824				.ivsize		= AES_BLOCK_SIZE,
 825			},
 826		},
 827	},
 828};
 829
 830/* SHA1 */
 831static struct ahash_alg dcp_sha1_alg = {
 832	.init	= dcp_sha_init,
 833	.update	= dcp_sha_update,
 834	.final	= dcp_sha_final,
 835	.finup	= dcp_sha_finup,
 836	.digest	= dcp_sha_digest,
 
 
 837	.halg	= {
 838		.digestsize	= SHA1_DIGEST_SIZE,
 
 839		.base		= {
 840			.cra_name		= "sha1",
 841			.cra_driver_name	= "sha1-dcp",
 842			.cra_priority		= 400,
 843			.cra_alignmask		= 63,
 844			.cra_flags		= CRYPTO_ALG_ASYNC,
 845			.cra_blocksize		= SHA1_BLOCK_SIZE,
 846			.cra_ctxsize		= sizeof(struct dcp_async_ctx),
 847			.cra_module		= THIS_MODULE,
 848			.cra_init		= dcp_sha_cra_init,
 849			.cra_exit		= dcp_sha_cra_exit,
 850		},
 851	},
 852};
 853
 854/* SHA256 */
 855static struct ahash_alg dcp_sha256_alg = {
 856	.init	= dcp_sha_init,
 857	.update	= dcp_sha_update,
 858	.final	= dcp_sha_final,
 859	.finup	= dcp_sha_finup,
 860	.digest	= dcp_sha_digest,
 
 
 861	.halg	= {
 862		.digestsize	= SHA256_DIGEST_SIZE,
 
 863		.base		= {
 864			.cra_name		= "sha256",
 865			.cra_driver_name	= "sha256-dcp",
 866			.cra_priority		= 400,
 867			.cra_alignmask		= 63,
 868			.cra_flags		= CRYPTO_ALG_ASYNC,
 869			.cra_blocksize		= SHA256_BLOCK_SIZE,
 870			.cra_ctxsize		= sizeof(struct dcp_async_ctx),
 871			.cra_module		= THIS_MODULE,
 872			.cra_init		= dcp_sha_cra_init,
 873			.cra_exit		= dcp_sha_cra_exit,
 874		},
 875	},
 876};
 877
 878static irqreturn_t mxs_dcp_irq(int irq, void *context)
 879{
 880	struct dcp *sdcp = context;
 881	uint32_t stat;
 882	int i;
 883
 884	stat = readl(sdcp->base + MXS_DCP_STAT);
 885	stat &= MXS_DCP_STAT_IRQ_MASK;
 886	if (!stat)
 887		return IRQ_NONE;
 888
 889	/* Clear the interrupts. */
 890	writel(stat, sdcp->base + MXS_DCP_STAT_CLR);
 891
 892	/* Complete the DMA requests that finished. */
 893	for (i = 0; i < DCP_MAX_CHANS; i++)
 894		if (stat & (1 << i))
 895			complete(&sdcp->completion[i]);
 896
 897	return IRQ_HANDLED;
 898}
 899
 900static int mxs_dcp_probe(struct platform_device *pdev)
 901{
 902	struct device *dev = &pdev->dev;
 903	struct dcp *sdcp = NULL;
 904	int i, ret;
 905
 906	struct resource *iores;
 907	int dcp_vmi_irq, dcp_irq;
 908
 909	if (global_sdcp) {
 910		dev_err(dev, "Only one DCP instance allowed!\n");
 911		return -ENODEV;
 912	}
 913
 914	iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 915	dcp_vmi_irq = platform_get_irq(pdev, 0);
 916	if (dcp_vmi_irq < 0)
 917		return dcp_vmi_irq;
 918
 919	dcp_irq = platform_get_irq(pdev, 1);
 920	if (dcp_irq < 0)
 921		return dcp_irq;
 922
 923	sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL);
 924	if (!sdcp)
 925		return -ENOMEM;
 926
 927	sdcp->dev = dev;
 928	sdcp->base = devm_ioremap_resource(dev, iores);
 929	if (IS_ERR(sdcp->base))
 930		return PTR_ERR(sdcp->base);
 931
 932
 933	ret = devm_request_irq(dev, dcp_vmi_irq, mxs_dcp_irq, 0,
 934			       "dcp-vmi-irq", sdcp);
 935	if (ret) {
 936		dev_err(dev, "Failed to claim DCP VMI IRQ!\n");
 937		return ret;
 938	}
 939
 940	ret = devm_request_irq(dev, dcp_irq, mxs_dcp_irq, 0,
 941			       "dcp-irq", sdcp);
 942	if (ret) {
 943		dev_err(dev, "Failed to claim DCP IRQ!\n");
 944		return ret;
 945	}
 946
 947	/* Allocate coherent helper block. */
 948	sdcp->coh = devm_kzalloc(dev, sizeof(*sdcp->coh) + DCP_ALIGNMENT,
 949				   GFP_KERNEL);
 950	if (!sdcp->coh)
 951		return -ENOMEM;
 952
 953	/* Re-align the structure so it fits the DCP constraints. */
 954	sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT);
 955
 
 
 
 
 
 956	/* Restart the DCP block. */
 957	ret = stmp_reset_block(sdcp->base);
 958	if (ret)
 
 959		return ret;
 
 960
 961	/* Initialize control register. */
 962	writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES |
 963	       MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING | 0xf,
 964	       sdcp->base + MXS_DCP_CTRL);
 965
 966	/* Enable all DCP DMA channels. */
 967	writel(MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK,
 968	       sdcp->base + MXS_DCP_CHANNELCTRL);
 969
 970	/*
 971	 * We do not enable context switching. Give the context buffer a
 972	 * pointer to an illegal address so if context switching is
 973	 * inadvertantly enabled, the DCP will return an error instead of
 974	 * trashing good memory. The DCP DMA cannot access ROM, so any ROM
 975	 * address will do.
 976	 */
 977	writel(0xffff0000, sdcp->base + MXS_DCP_CONTEXT);
 978	for (i = 0; i < DCP_MAX_CHANS; i++)
 979		writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(i));
 980	writel(0xffffffff, sdcp->base + MXS_DCP_STAT_CLR);
 981
 982	global_sdcp = sdcp;
 983
 984	platform_set_drvdata(pdev, sdcp);
 985
 986	for (i = 0; i < DCP_MAX_CHANS; i++) {
 987		mutex_init(&sdcp->mutex[i]);
 988		init_completion(&sdcp->completion[i]);
 989		crypto_init_queue(&sdcp->queue[i], 50);
 990	}
 991
 992	/* Create the SHA and AES handler threads. */
 993	sdcp->thread[DCP_CHAN_HASH_SHA] = kthread_run(dcp_chan_thread_sha,
 994						      NULL, "mxs_dcp_chan/sha");
 995	if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) {
 996		dev_err(dev, "Error starting SHA thread!\n");
 997		return PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]);
 
 998	}
 999
1000	sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes,
1001						    NULL, "mxs_dcp_chan/aes");
1002	if (IS_ERR(sdcp->thread[DCP_CHAN_CRYPTO])) {
1003		dev_err(dev, "Error starting SHA thread!\n");
1004		ret = PTR_ERR(sdcp->thread[DCP_CHAN_CRYPTO]);
1005		goto err_destroy_sha_thread;
1006	}
1007
1008	/* Register the various crypto algorithms. */
1009	sdcp->caps = readl(sdcp->base + MXS_DCP_CAPABILITY1);
1010
1011	if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) {
1012		ret = crypto_register_algs(dcp_aes_algs,
1013					   ARRAY_SIZE(dcp_aes_algs));
1014		if (ret) {
1015			/* Failed to register algorithm. */
1016			dev_err(dev, "Failed to register AES crypto!\n");
1017			goto err_destroy_aes_thread;
1018		}
1019	}
1020
1021	if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) {
1022		ret = crypto_register_ahash(&dcp_sha1_alg);
1023		if (ret) {
1024			dev_err(dev, "Failed to register %s hash!\n",
1025				dcp_sha1_alg.halg.base.cra_name);
1026			goto err_unregister_aes;
1027		}
1028	}
1029
1030	if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) {
1031		ret = crypto_register_ahash(&dcp_sha256_alg);
1032		if (ret) {
1033			dev_err(dev, "Failed to register %s hash!\n",
1034				dcp_sha256_alg.halg.base.cra_name);
1035			goto err_unregister_sha1;
1036		}
1037	}
1038
1039	return 0;
1040
1041err_unregister_sha1:
1042	if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
1043		crypto_unregister_ahash(&dcp_sha1_alg);
1044
1045err_unregister_aes:
1046	if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
1047		crypto_unregister_algs(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
1048
1049err_destroy_aes_thread:
1050	kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
1051
1052err_destroy_sha_thread:
1053	kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
 
1054	return ret;
1055}
1056
1057static int mxs_dcp_remove(struct platform_device *pdev)
1058{
1059	struct dcp *sdcp = platform_get_drvdata(pdev);
1060
1061	if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256)
1062		crypto_unregister_ahash(&dcp_sha256_alg);
1063
1064	if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
1065		crypto_unregister_ahash(&dcp_sha1_alg);
1066
1067	if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
1068		crypto_unregister_algs(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
1069
1070	kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
1071	kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
1072
1073	platform_set_drvdata(pdev, NULL);
1074
1075	global_sdcp = NULL;
1076
1077	return 0;
1078}
1079
1080static const struct of_device_id mxs_dcp_dt_ids[] = {
1081	{ .compatible = "fsl,imx23-dcp", .data = NULL, },
1082	{ .compatible = "fsl,imx28-dcp", .data = NULL, },
1083	{ /* sentinel */ }
1084};
1085
1086MODULE_DEVICE_TABLE(of, mxs_dcp_dt_ids);
1087
1088static struct platform_driver mxs_dcp_driver = {
1089	.probe	= mxs_dcp_probe,
1090	.remove	= mxs_dcp_remove,
1091	.driver	= {
1092		.name		= "mxs-dcp",
1093		.of_match_table	= mxs_dcp_dt_ids,
1094	},
1095};
1096
1097module_platform_driver(mxs_dcp_driver);
1098
1099MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
1100MODULE_DESCRIPTION("Freescale MXS DCP Driver");
1101MODULE_LICENSE("GPL");
1102MODULE_ALIAS("platform:mxs-dcp");