Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Freescale i.MX23/i.MX28 Data Co-Processor driver
   4 *
   5 * Copyright (C) 2013 Marek Vasut <marex@denx.de>
   6 */
   7
   8#include <linux/dma-mapping.h>
   9#include <linux/interrupt.h>
  10#include <linux/io.h>
  11#include <linux/kernel.h>
  12#include <linux/kthread.h>
  13#include <linux/module.h>
  14#include <linux/of.h>
  15#include <linux/platform_device.h>
  16#include <linux/stmp_device.h>
  17#include <linux/clk.h>
  18
  19#include <crypto/aes.h>
  20#include <crypto/sha1.h>
  21#include <crypto/sha2.h>
  22#include <crypto/internal/hash.h>
  23#include <crypto/internal/skcipher.h>
  24#include <crypto/scatterwalk.h>
  25
  26#define DCP_MAX_CHANS	4
  27#define DCP_BUF_SZ	PAGE_SIZE
  28#define DCP_SHA_PAY_SZ  64
  29
  30#define DCP_ALIGNMENT	64
  31
  32/*
  33 * Null hashes to align with hw behavior on imx6sl and ull
  34 * these are flipped for consistency with hw output
  35 */
  36static const uint8_t sha1_null_hash[] =
  37	"\x09\x07\xd8\xaf\x90\x18\x60\x95\xef\xbf"
  38	"\x55\x32\x0d\x4b\x6b\x5e\xee\xa3\x39\xda";
  39
  40static const uint8_t sha256_null_hash[] =
  41	"\x55\xb8\x52\x78\x1b\x99\x95\xa4"
  42	"\x4c\x93\x9b\x64\xe4\x41\xae\x27"
  43	"\x24\xb9\x6f\x99\xc8\xf4\xfb\x9a"
  44	"\x14\x1c\xfc\x98\x42\xc4\xb0\xe3";
  45
  46/* DCP DMA descriptor. */
  47struct dcp_dma_desc {
  48	uint32_t	next_cmd_addr;
  49	uint32_t	control0;
  50	uint32_t	control1;
  51	uint32_t	source;
  52	uint32_t	destination;
  53	uint32_t	size;
  54	uint32_t	payload;
  55	uint32_t	status;
  56};
  57
  58/* Coherent aligned block for bounce buffering. */
  59struct dcp_coherent_block {
  60	uint8_t			aes_in_buf[DCP_BUF_SZ];
  61	uint8_t			aes_out_buf[DCP_BUF_SZ];
  62	uint8_t			sha_in_buf[DCP_BUF_SZ];
  63	uint8_t			sha_out_buf[DCP_SHA_PAY_SZ];
  64
  65	uint8_t			aes_key[2 * AES_KEYSIZE_128];
  66
  67	struct dcp_dma_desc	desc[DCP_MAX_CHANS];
  68};
  69
  70struct dcp {
  71	struct device			*dev;
  72	void __iomem			*base;
  73
  74	uint32_t			caps;
  75
  76	struct dcp_coherent_block	*coh;
  77
  78	struct completion		completion[DCP_MAX_CHANS];
  79	spinlock_t			lock[DCP_MAX_CHANS];
  80	struct task_struct		*thread[DCP_MAX_CHANS];
  81	struct crypto_queue		queue[DCP_MAX_CHANS];
  82	struct clk			*dcp_clk;
  83};
  84
  85enum dcp_chan {
  86	DCP_CHAN_HASH_SHA	= 0,
  87	DCP_CHAN_CRYPTO		= 2,
  88};
  89
  90struct dcp_async_ctx {
  91	/* Common context */
  92	enum dcp_chan	chan;
  93	uint32_t	fill;
  94
  95	/* SHA Hash-specific context */
  96	struct mutex			mutex;
  97	uint32_t			alg;
  98	unsigned int			hot:1;
  99
 100	/* Crypto-specific context */
 101	struct crypto_skcipher		*fallback;
 102	unsigned int			key_len;
 103	uint8_t				key[AES_KEYSIZE_128];
 104};
 105
 106struct dcp_aes_req_ctx {
 107	unsigned int	enc:1;
 108	unsigned int	ecb:1;
 109	struct skcipher_request fallback_req;	// keep at the end
 110};
 111
 112struct dcp_sha_req_ctx {
 113	unsigned int	init:1;
 114	unsigned int	fini:1;
 115};
 116
 117struct dcp_export_state {
 118	struct dcp_sha_req_ctx req_ctx;
 119	struct dcp_async_ctx async_ctx;
 120};
 121
 122/*
 123 * There can even be only one instance of the MXS DCP due to the
 124 * design of Linux Crypto API.
 125 */
 126static struct dcp *global_sdcp;
 127
 128/* DCP register layout. */
 129#define MXS_DCP_CTRL				0x00
 130#define MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES	(1 << 23)
 131#define MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING	(1 << 22)
 132
 133#define MXS_DCP_STAT				0x10
 134#define MXS_DCP_STAT_CLR			0x18
 135#define MXS_DCP_STAT_IRQ_MASK			0xf
 136
 137#define MXS_DCP_CHANNELCTRL			0x20
 138#define MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK	0xff
 139
 140#define MXS_DCP_CAPABILITY1			0x40
 141#define MXS_DCP_CAPABILITY1_SHA256		(4 << 16)
 142#define MXS_DCP_CAPABILITY1_SHA1		(1 << 16)
 143#define MXS_DCP_CAPABILITY1_AES128		(1 << 0)
 144
 145#define MXS_DCP_CONTEXT				0x50
 146
 147#define MXS_DCP_CH_N_CMDPTR(n)			(0x100 + ((n) * 0x40))
 148
 149#define MXS_DCP_CH_N_SEMA(n)			(0x110 + ((n) * 0x40))
 150
 151#define MXS_DCP_CH_N_STAT(n)			(0x120 + ((n) * 0x40))
 152#define MXS_DCP_CH_N_STAT_CLR(n)		(0x128 + ((n) * 0x40))
 153
 154/* DMA descriptor bits. */
 155#define MXS_DCP_CONTROL0_HASH_TERM		(1 << 13)
 156#define MXS_DCP_CONTROL0_HASH_INIT		(1 << 12)
 157#define MXS_DCP_CONTROL0_PAYLOAD_KEY		(1 << 11)
 158#define MXS_DCP_CONTROL0_CIPHER_ENCRYPT		(1 << 8)
 159#define MXS_DCP_CONTROL0_CIPHER_INIT		(1 << 9)
 160#define MXS_DCP_CONTROL0_ENABLE_HASH		(1 << 6)
 161#define MXS_DCP_CONTROL0_ENABLE_CIPHER		(1 << 5)
 162#define MXS_DCP_CONTROL0_DECR_SEMAPHORE		(1 << 1)
 163#define MXS_DCP_CONTROL0_INTERRUPT		(1 << 0)
 164
 165#define MXS_DCP_CONTROL1_HASH_SELECT_SHA256	(2 << 16)
 166#define MXS_DCP_CONTROL1_HASH_SELECT_SHA1	(0 << 16)
 167#define MXS_DCP_CONTROL1_CIPHER_MODE_CBC	(1 << 4)
 168#define MXS_DCP_CONTROL1_CIPHER_MODE_ECB	(0 << 4)
 169#define MXS_DCP_CONTROL1_CIPHER_SELECT_AES128	(0 << 0)
 170
 171static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
 172{
 173	int dma_err;
 174	struct dcp *sdcp = global_sdcp;
 175	const int chan = actx->chan;
 176	uint32_t stat;
 177	unsigned long ret;
 178	struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
 
 179	dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc),
 180					      DMA_TO_DEVICE);
 181
 182	dma_err = dma_mapping_error(sdcp->dev, desc_phys);
 183	if (dma_err)
 184		return dma_err;
 185
 186	reinit_completion(&sdcp->completion[chan]);
 187
 188	/* Clear status register. */
 189	writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(chan));
 190
 191	/* Load the DMA descriptor. */
 192	writel(desc_phys, sdcp->base + MXS_DCP_CH_N_CMDPTR(chan));
 193
 194	/* Increment the semaphore to start the DMA transfer. */
 195	writel(1, sdcp->base + MXS_DCP_CH_N_SEMA(chan));
 196
 197	ret = wait_for_completion_timeout(&sdcp->completion[chan],
 198					  msecs_to_jiffies(1000));
 199	if (!ret) {
 200		dev_err(sdcp->dev, "Channel %i timeout (DCP_STAT=0x%08x)\n",
 201			chan, readl(sdcp->base + MXS_DCP_STAT));
 202		return -ETIMEDOUT;
 203	}
 204
 205	stat = readl(sdcp->base + MXS_DCP_CH_N_STAT(chan));
 206	if (stat & 0xff) {
 207		dev_err(sdcp->dev, "Channel %i error (CH_STAT=0x%08x)\n",
 208			chan, stat);
 209		return -EINVAL;
 210	}
 211
 212	dma_unmap_single(sdcp->dev, desc_phys, sizeof(*desc), DMA_TO_DEVICE);
 213
 214	return 0;
 215}
 216
 217/*
 218 * Encryption (AES128)
 219 */
 220static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
 221			   struct skcipher_request *req, int init)
 222{
 223	dma_addr_t key_phys, src_phys, dst_phys;
 224	struct dcp *sdcp = global_sdcp;
 225	struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
 226	struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
 227	int ret;
 228
 229	key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
 230				  2 * AES_KEYSIZE_128, DMA_TO_DEVICE);
 231	ret = dma_mapping_error(sdcp->dev, key_phys);
 232	if (ret)
 233		return ret;
 234
 235	src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
 236				  DCP_BUF_SZ, DMA_TO_DEVICE);
 237	ret = dma_mapping_error(sdcp->dev, src_phys);
 238	if (ret)
 239		goto err_src;
 240
 241	dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
 242				  DCP_BUF_SZ, DMA_FROM_DEVICE);
 243	ret = dma_mapping_error(sdcp->dev, dst_phys);
 244	if (ret)
 245		goto err_dst;
 246
 247	if (actx->fill % AES_BLOCK_SIZE) {
 248		dev_err(sdcp->dev, "Invalid block size!\n");
 249		ret = -EINVAL;
 250		goto aes_done_run;
 251	}
 252
 253	/* Fill in the DMA descriptor. */
 254	desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
 255		    MXS_DCP_CONTROL0_INTERRUPT |
 256		    MXS_DCP_CONTROL0_ENABLE_CIPHER;
 257
 258	/* Payload contains the key. */
 259	desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY;
 260
 261	if (rctx->enc)
 262		desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT;
 263	if (init)
 264		desc->control0 |= MXS_DCP_CONTROL0_CIPHER_INIT;
 265
 266	desc->control1 = MXS_DCP_CONTROL1_CIPHER_SELECT_AES128;
 267
 268	if (rctx->ecb)
 269		desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_ECB;
 270	else
 271		desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_CBC;
 272
 273	desc->next_cmd_addr = 0;
 274	desc->source = src_phys;
 275	desc->destination = dst_phys;
 276	desc->size = actx->fill;
 277	desc->payload = key_phys;
 278	desc->status = 0;
 279
 280	ret = mxs_dcp_start_dma(actx);
 281
 282aes_done_run:
 283	dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE);
 284err_dst:
 285	dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
 286err_src:
 287	dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
 288			 DMA_TO_DEVICE);
 
 
 289
 290	return ret;
 291}
 292
 293static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
 294{
 295	struct dcp *sdcp = global_sdcp;
 296
 297	struct skcipher_request *req = skcipher_request_cast(arq);
 298	struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
 299	struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
 300
 301	struct scatterlist *dst = req->dst;
 302	struct scatterlist *src = req->src;
 303	int dst_nents = sg_nents(dst);
 304
 305	const int out_off = DCP_BUF_SZ;
 306	uint8_t *in_buf = sdcp->coh->aes_in_buf;
 307	uint8_t *out_buf = sdcp->coh->aes_out_buf;
 308
 
 309	uint32_t dst_off = 0;
 310	uint8_t *src_buf = NULL;
 311	uint32_t last_out_len = 0;
 312
 313	uint8_t *key = sdcp->coh->aes_key;
 314
 315	int ret = 0;
 316	unsigned int i, len, clen, tlen = 0;
 
 317	int init = 0;
 318	bool limit_hit = false;
 319
 320	actx->fill = 0;
 321
 322	/* Copy the key from the temporary location. */
 323	memcpy(key, actx->key, actx->key_len);
 324
 325	if (!rctx->ecb) {
 326		/* Copy the CBC IV just past the key. */
 327		memcpy(key + AES_KEYSIZE_128, req->iv, AES_KEYSIZE_128);
 328		/* CBC needs the INIT set. */
 329		init = 1;
 330	} else {
 331		memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128);
 332	}
 333
 334	for_each_sg(req->src, src, sg_nents(req->src), i) {
 335		src_buf = sg_virt(src);
 336		len = sg_dma_len(src);
 337		tlen += len;
 338		limit_hit = tlen > req->cryptlen;
 339
 340		if (limit_hit)
 341			len = req->cryptlen - (tlen - len);
 342
 343		do {
 344			if (actx->fill + len > out_off)
 345				clen = out_off - actx->fill;
 346			else
 347				clen = len;
 348
 349			memcpy(in_buf + actx->fill, src_buf, clen);
 350			len -= clen;
 351			src_buf += clen;
 352			actx->fill += clen;
 353
 354			/*
 355			 * If we filled the buffer or this is the last SG,
 356			 * submit the buffer.
 357			 */
 358			if (actx->fill == out_off || sg_is_last(src) ||
 359			    limit_hit) {
 360				ret = mxs_dcp_run_aes(actx, req, init);
 361				if (ret)
 362					return ret;
 363				init = 0;
 364
 365				sg_pcopy_from_buffer(dst, dst_nents, out_buf,
 366						     actx->fill, dst_off);
 367				dst_off += actx->fill;
 368				last_out_len = actx->fill;
 369				actx->fill = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 370			}
 371		} while (len);
 372
 373		if (limit_hit)
 374			break;
 375	}
 376
 377	/* Copy the IV for CBC for chaining */
 378	if (!rctx->ecb) {
 379		if (rctx->enc)
 380			memcpy(req->iv, out_buf+(last_out_len-AES_BLOCK_SIZE),
 381				AES_BLOCK_SIZE);
 382		else
 383			memcpy(req->iv, in_buf+(last_out_len-AES_BLOCK_SIZE),
 384				AES_BLOCK_SIZE);
 385	}
 386
 387	return ret;
 388}
 389
 390static int dcp_chan_thread_aes(void *data)
 391{
 392	struct dcp *sdcp = global_sdcp;
 393	const int chan = DCP_CHAN_CRYPTO;
 394
 395	struct crypto_async_request *backlog;
 396	struct crypto_async_request *arq;
 397
 398	int ret;
 399
 400	while (!kthread_should_stop()) {
 401		set_current_state(TASK_INTERRUPTIBLE);
 402
 403		spin_lock(&sdcp->lock[chan]);
 404		backlog = crypto_get_backlog(&sdcp->queue[chan]);
 405		arq = crypto_dequeue_request(&sdcp->queue[chan]);
 406		spin_unlock(&sdcp->lock[chan]);
 407
 408		if (!backlog && !arq) {
 409			schedule();
 410			continue;
 411		}
 412
 413		set_current_state(TASK_RUNNING);
 414
 415		if (backlog)
 416			crypto_request_complete(backlog, -EINPROGRESS);
 417
 418		if (arq) {
 419			ret = mxs_dcp_aes_block_crypt(arq);
 420			crypto_request_complete(arq, ret);
 421		}
 422	}
 423
 424	return 0;
 425}
 426
 427static int mxs_dcp_block_fallback(struct skcipher_request *req, int enc)
 428{
 429	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 430	struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
 431	struct dcp_async_ctx *ctx = crypto_skcipher_ctx(tfm);
 432	int ret;
 433
 434	skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
 435	skcipher_request_set_callback(&rctx->fallback_req, req->base.flags,
 436				      req->base.complete, req->base.data);
 437	skcipher_request_set_crypt(&rctx->fallback_req, req->src, req->dst,
 438				   req->cryptlen, req->iv);
 439
 440	if (enc)
 441		ret = crypto_skcipher_encrypt(&rctx->fallback_req);
 442	else
 443		ret = crypto_skcipher_decrypt(&rctx->fallback_req);
 
 
 444
 445	return ret;
 446}
 447
 448static int mxs_dcp_aes_enqueue(struct skcipher_request *req, int enc, int ecb)
 449{
 450	struct dcp *sdcp = global_sdcp;
 451	struct crypto_async_request *arq = &req->base;
 452	struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
 453	struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
 454	int ret;
 455
 456	if (unlikely(actx->key_len != AES_KEYSIZE_128))
 457		return mxs_dcp_block_fallback(req, enc);
 458
 459	rctx->enc = enc;
 460	rctx->ecb = ecb;
 461	actx->chan = DCP_CHAN_CRYPTO;
 462
 463	spin_lock(&sdcp->lock[actx->chan]);
 464	ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
 465	spin_unlock(&sdcp->lock[actx->chan]);
 466
 467	wake_up_process(sdcp->thread[actx->chan]);
 468
 469	return ret;
 470}
 471
 472static int mxs_dcp_aes_ecb_decrypt(struct skcipher_request *req)
 473{
 474	return mxs_dcp_aes_enqueue(req, 0, 1);
 475}
 476
 477static int mxs_dcp_aes_ecb_encrypt(struct skcipher_request *req)
 478{
 479	return mxs_dcp_aes_enqueue(req, 1, 1);
 480}
 481
 482static int mxs_dcp_aes_cbc_decrypt(struct skcipher_request *req)
 483{
 484	return mxs_dcp_aes_enqueue(req, 0, 0);
 485}
 486
 487static int mxs_dcp_aes_cbc_encrypt(struct skcipher_request *req)
 488{
 489	return mxs_dcp_aes_enqueue(req, 1, 0);
 490}
 491
 492static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
 493			      unsigned int len)
 494{
 495	struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
 
 496
 497	/*
 498	 * AES 128 is supposed by the hardware, store key into temporary
 499	 * buffer and exit. We must use the temporary buffer here, since
 500	 * there can still be an operation in progress.
 501	 */
 502	actx->key_len = len;
 503	if (len == AES_KEYSIZE_128) {
 504		memcpy(actx->key, key, len);
 505		return 0;
 506	}
 507
 508	/*
 509	 * If the requested AES key size is not supported by the hardware,
 510	 * but is supported by in-kernel software implementation, we use
 511	 * software fallback.
 512	 */
 513	crypto_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK);
 514	crypto_skcipher_set_flags(actx->fallback,
 515				  tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
 516	return crypto_skcipher_setkey(actx->fallback, key, len);
 
 
 
 
 
 
 
 
 
 517}
 518
 519static int mxs_dcp_aes_fallback_init_tfm(struct crypto_skcipher *tfm)
 520{
 521	const char *name = crypto_tfm_alg_name(crypto_skcipher_tfm(tfm));
 522	struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
 523	struct crypto_skcipher *blk;
 524
 525	blk = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
 526	if (IS_ERR(blk))
 527		return PTR_ERR(blk);
 528
 529	actx->fallback = blk;
 530	crypto_skcipher_set_reqsize(tfm, sizeof(struct dcp_aes_req_ctx) +
 531					 crypto_skcipher_reqsize(blk));
 532	return 0;
 533}
 534
 535static void mxs_dcp_aes_fallback_exit_tfm(struct crypto_skcipher *tfm)
 536{
 537	struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
 538
 539	crypto_free_skcipher(actx->fallback);
 540}
 541
 542/*
 543 * Hashing (SHA1/SHA256)
 544 */
 545static int mxs_dcp_run_sha(struct ahash_request *req)
 546{
 547	struct dcp *sdcp = global_sdcp;
 548	int ret;
 549
 550	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 551	struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
 552	struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
 553	struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
 554
 555	dma_addr_t digest_phys = 0;
 556	dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf,
 557					     DCP_BUF_SZ, DMA_TO_DEVICE);
 558
 559	ret = dma_mapping_error(sdcp->dev, buf_phys);
 560	if (ret)
 561		return ret;
 562
 563	/* Fill in the DMA descriptor. */
 564	desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
 565		    MXS_DCP_CONTROL0_INTERRUPT |
 566		    MXS_DCP_CONTROL0_ENABLE_HASH;
 567	if (rctx->init)
 568		desc->control0 |= MXS_DCP_CONTROL0_HASH_INIT;
 569
 570	desc->control1 = actx->alg;
 571	desc->next_cmd_addr = 0;
 572	desc->source = buf_phys;
 573	desc->destination = 0;
 574	desc->size = actx->fill;
 575	desc->payload = 0;
 576	desc->status = 0;
 577
 578	/*
 579	 * Align driver with hw behavior when generating null hashes
 580	 */
 581	if (rctx->init && rctx->fini && desc->size == 0) {
 582		struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
 583		const uint8_t *sha_buf =
 584			(actx->alg == MXS_DCP_CONTROL1_HASH_SELECT_SHA1) ?
 585			sha1_null_hash : sha256_null_hash;
 586		memcpy(sdcp->coh->sha_out_buf, sha_buf, halg->digestsize);
 587		ret = 0;
 588		goto done_run;
 589	}
 590
 591	/* Set HASH_TERM bit for last transfer block. */
 592	if (rctx->fini) {
 593		digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf,
 594					     DCP_SHA_PAY_SZ, DMA_FROM_DEVICE);
 595		ret = dma_mapping_error(sdcp->dev, digest_phys);
 596		if (ret)
 597			goto done_run;
 598
 599		desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM;
 600		desc->payload = digest_phys;
 601	}
 602
 603	ret = mxs_dcp_start_dma(actx);
 604
 605	if (rctx->fini)
 606		dma_unmap_single(sdcp->dev, digest_phys, DCP_SHA_PAY_SZ,
 607				 DMA_FROM_DEVICE);
 608
 609done_run:
 610	dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
 611
 612	return ret;
 613}
 614
 615static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
 616{
 617	struct dcp *sdcp = global_sdcp;
 618
 619	struct ahash_request *req = ahash_request_cast(arq);
 620	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 621	struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
 622	struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
 623	struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
 
 624
 625	uint8_t *in_buf = sdcp->coh->sha_in_buf;
 626	uint8_t *out_buf = sdcp->coh->sha_out_buf;
 627
 
 
 628	struct scatterlist *src;
 629
 630	unsigned int i, len, clen, oft = 0;
 631	int ret;
 632
 633	int fin = rctx->fini;
 634	if (fin)
 635		rctx->fini = 0;
 636
 637	src = req->src;
 638	len = req->nbytes;
 
 639
 640	while (len) {
 641		if (actx->fill + len > DCP_BUF_SZ)
 642			clen = DCP_BUF_SZ - actx->fill;
 643		else
 644			clen = len;
 645
 646		scatterwalk_map_and_copy(in_buf + actx->fill, src, oft, clen,
 647					 0);
 
 
 648
 649		len -= clen;
 650		oft += clen;
 651		actx->fill += clen;
 652
 653		/*
 654		 * If we filled the buffer and still have some
 655		 * more data, submit the buffer.
 656		 */
 657		if (len && actx->fill == DCP_BUF_SZ) {
 658			ret = mxs_dcp_run_sha(req);
 659			if (ret)
 660				return ret;
 661			actx->fill = 0;
 662			rctx->init = 0;
 663		}
 664	}
 665
 666	if (fin) {
 667		rctx->fini = 1;
 668
 669		/* Submit whatever is left. */
 670		if (!req->result)
 671			return -EINVAL;
 672
 673		ret = mxs_dcp_run_sha(req);
 674		if (ret)
 675			return ret;
 676
 677		actx->fill = 0;
 678
 679		/* For some reason the result is flipped */
 680		for (i = 0; i < halg->digestsize; i++)
 681			req->result[i] = out_buf[halg->digestsize - i - 1];
 682	}
 683
 684	return 0;
 685}
 686
 687static int dcp_chan_thread_sha(void *data)
 688{
 689	struct dcp *sdcp = global_sdcp;
 690	const int chan = DCP_CHAN_HASH_SHA;
 691
 692	struct crypto_async_request *backlog;
 693	struct crypto_async_request *arq;
 694	int ret;
 695
 696	while (!kthread_should_stop()) {
 697		set_current_state(TASK_INTERRUPTIBLE);
 698
 699		spin_lock(&sdcp->lock[chan]);
 700		backlog = crypto_get_backlog(&sdcp->queue[chan]);
 701		arq = crypto_dequeue_request(&sdcp->queue[chan]);
 702		spin_unlock(&sdcp->lock[chan]);
 703
 704		if (!backlog && !arq) {
 705			schedule();
 706			continue;
 707		}
 708
 709		set_current_state(TASK_RUNNING);
 710
 711		if (backlog)
 712			crypto_request_complete(backlog, -EINPROGRESS);
 713
 714		if (arq) {
 715			ret = dcp_sha_req_to_buf(arq);
 716			crypto_request_complete(arq, ret);
 717		}
 718	}
 719
 720	return 0;
 721}
 722
 723static int dcp_sha_init(struct ahash_request *req)
 724{
 725	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 726	struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
 727
 728	struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
 729
 730	/*
 731	 * Start hashing session. The code below only inits the
 732	 * hashing session context, nothing more.
 733	 */
 734	memset(actx, 0, sizeof(*actx));
 735
 736	if (strcmp(halg->base.cra_name, "sha1") == 0)
 737		actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA1;
 738	else
 739		actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA256;
 740
 741	actx->fill = 0;
 742	actx->hot = 0;
 743	actx->chan = DCP_CHAN_HASH_SHA;
 744
 745	mutex_init(&actx->mutex);
 746
 747	return 0;
 748}
 749
 750static int dcp_sha_update_fx(struct ahash_request *req, int fini)
 751{
 752	struct dcp *sdcp = global_sdcp;
 753
 754	struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
 755	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 756	struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
 757
 758	int ret;
 759
 760	/*
 761	 * Ignore requests that have no data in them and are not
 762	 * the trailing requests in the stream of requests.
 763	 */
 764	if (!req->nbytes && !fini)
 765		return 0;
 766
 767	mutex_lock(&actx->mutex);
 768
 769	rctx->fini = fini;
 770
 771	if (!actx->hot) {
 772		actx->hot = 1;
 773		rctx->init = 1;
 774	}
 775
 776	spin_lock(&sdcp->lock[actx->chan]);
 777	ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
 778	spin_unlock(&sdcp->lock[actx->chan]);
 779
 780	wake_up_process(sdcp->thread[actx->chan]);
 781	mutex_unlock(&actx->mutex);
 782
 783	return ret;
 784}
 785
 786static int dcp_sha_update(struct ahash_request *req)
 787{
 788	return dcp_sha_update_fx(req, 0);
 789}
 790
 791static int dcp_sha_final(struct ahash_request *req)
 792{
 793	ahash_request_set_crypt(req, NULL, req->result, 0);
 794	req->nbytes = 0;
 795	return dcp_sha_update_fx(req, 1);
 796}
 797
 798static int dcp_sha_finup(struct ahash_request *req)
 799{
 800	return dcp_sha_update_fx(req, 1);
 801}
 802
 803static int dcp_sha_digest(struct ahash_request *req)
 804{
 805	int ret;
 806
 807	ret = dcp_sha_init(req);
 808	if (ret)
 809		return ret;
 810
 811	return dcp_sha_finup(req);
 812}
 813
 814static int dcp_sha_import(struct ahash_request *req, const void *in)
 815{
 816	struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
 817	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 818	struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
 819	const struct dcp_export_state *export = in;
 820
 821	memset(rctx, 0, sizeof(struct dcp_sha_req_ctx));
 822	memset(actx, 0, sizeof(struct dcp_async_ctx));
 823	memcpy(rctx, &export->req_ctx, sizeof(struct dcp_sha_req_ctx));
 824	memcpy(actx, &export->async_ctx, sizeof(struct dcp_async_ctx));
 825
 826	return 0;
 827}
 828
 829static int dcp_sha_export(struct ahash_request *req, void *out)
 830{
 831	struct dcp_sha_req_ctx *rctx_state = ahash_request_ctx(req);
 832	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 833	struct dcp_async_ctx *actx_state = crypto_ahash_ctx(tfm);
 834	struct dcp_export_state *export = out;
 835
 836	memcpy(&export->req_ctx, rctx_state, sizeof(struct dcp_sha_req_ctx));
 837	memcpy(&export->async_ctx, actx_state, sizeof(struct dcp_async_ctx));
 838
 839	return 0;
 840}
 841
 842static int dcp_sha_cra_init(struct crypto_tfm *tfm)
 843{
 844	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 845				 sizeof(struct dcp_sha_req_ctx));
 846	return 0;
 847}
 848
 849static void dcp_sha_cra_exit(struct crypto_tfm *tfm)
 850{
 851}
 852
 853/* AES 128 ECB and AES 128 CBC */
 854static struct skcipher_alg dcp_aes_algs[] = {
 855	{
 856		.base.cra_name		= "ecb(aes)",
 857		.base.cra_driver_name	= "ecb-aes-dcp",
 858		.base.cra_priority	= 400,
 859		.base.cra_alignmask	= 15,
 860		.base.cra_flags		= CRYPTO_ALG_ASYNC |
 
 861					  CRYPTO_ALG_NEED_FALLBACK,
 862		.base.cra_blocksize	= AES_BLOCK_SIZE,
 863		.base.cra_ctxsize	= sizeof(struct dcp_async_ctx),
 864		.base.cra_module	= THIS_MODULE,
 865
 866		.min_keysize		= AES_MIN_KEY_SIZE,
 867		.max_keysize		= AES_MAX_KEY_SIZE,
 868		.setkey			= mxs_dcp_aes_setkey,
 869		.encrypt		= mxs_dcp_aes_ecb_encrypt,
 870		.decrypt		= mxs_dcp_aes_ecb_decrypt,
 871		.init			= mxs_dcp_aes_fallback_init_tfm,
 872		.exit			= mxs_dcp_aes_fallback_exit_tfm,
 
 
 
 
 873	}, {
 874		.base.cra_name		= "cbc(aes)",
 875		.base.cra_driver_name	= "cbc-aes-dcp",
 876		.base.cra_priority	= 400,
 877		.base.cra_alignmask	= 15,
 878		.base.cra_flags		= CRYPTO_ALG_ASYNC |
 
 879					  CRYPTO_ALG_NEED_FALLBACK,
 880		.base.cra_blocksize	= AES_BLOCK_SIZE,
 881		.base.cra_ctxsize	= sizeof(struct dcp_async_ctx),
 882		.base.cra_module	= THIS_MODULE,
 883
 884		.min_keysize		= AES_MIN_KEY_SIZE,
 885		.max_keysize		= AES_MAX_KEY_SIZE,
 886		.setkey			= mxs_dcp_aes_setkey,
 887		.encrypt		= mxs_dcp_aes_cbc_encrypt,
 888		.decrypt		= mxs_dcp_aes_cbc_decrypt,
 889		.ivsize			= AES_BLOCK_SIZE,
 890		.init			= mxs_dcp_aes_fallback_init_tfm,
 891		.exit			= mxs_dcp_aes_fallback_exit_tfm,
 
 
 
 
 892	},
 893};
 894
 895/* SHA1 */
 896static struct ahash_alg dcp_sha1_alg = {
 897	.init	= dcp_sha_init,
 898	.update	= dcp_sha_update,
 899	.final	= dcp_sha_final,
 900	.finup	= dcp_sha_finup,
 901	.digest	= dcp_sha_digest,
 902	.import = dcp_sha_import,
 903	.export = dcp_sha_export,
 904	.halg	= {
 905		.digestsize	= SHA1_DIGEST_SIZE,
 906		.statesize	= sizeof(struct dcp_export_state),
 907		.base		= {
 908			.cra_name		= "sha1",
 909			.cra_driver_name	= "sha1-dcp",
 910			.cra_priority		= 400,
 
 911			.cra_flags		= CRYPTO_ALG_ASYNC,
 912			.cra_blocksize		= SHA1_BLOCK_SIZE,
 913			.cra_ctxsize		= sizeof(struct dcp_async_ctx),
 914			.cra_module		= THIS_MODULE,
 915			.cra_init		= dcp_sha_cra_init,
 916			.cra_exit		= dcp_sha_cra_exit,
 917		},
 918	},
 919};
 920
 921/* SHA256 */
 922static struct ahash_alg dcp_sha256_alg = {
 923	.init	= dcp_sha_init,
 924	.update	= dcp_sha_update,
 925	.final	= dcp_sha_final,
 926	.finup	= dcp_sha_finup,
 927	.digest	= dcp_sha_digest,
 928	.import = dcp_sha_import,
 929	.export = dcp_sha_export,
 930	.halg	= {
 931		.digestsize	= SHA256_DIGEST_SIZE,
 932		.statesize	= sizeof(struct dcp_export_state),
 933		.base		= {
 934			.cra_name		= "sha256",
 935			.cra_driver_name	= "sha256-dcp",
 936			.cra_priority		= 400,
 
 937			.cra_flags		= CRYPTO_ALG_ASYNC,
 938			.cra_blocksize		= SHA256_BLOCK_SIZE,
 939			.cra_ctxsize		= sizeof(struct dcp_async_ctx),
 940			.cra_module		= THIS_MODULE,
 941			.cra_init		= dcp_sha_cra_init,
 942			.cra_exit		= dcp_sha_cra_exit,
 943		},
 944	},
 945};
 946
 947static irqreturn_t mxs_dcp_irq(int irq, void *context)
 948{
 949	struct dcp *sdcp = context;
 950	uint32_t stat;
 951	int i;
 952
 953	stat = readl(sdcp->base + MXS_DCP_STAT);
 954	stat &= MXS_DCP_STAT_IRQ_MASK;
 955	if (!stat)
 956		return IRQ_NONE;
 957
 958	/* Clear the interrupts. */
 959	writel(stat, sdcp->base + MXS_DCP_STAT_CLR);
 960
 961	/* Complete the DMA requests that finished. */
 962	for (i = 0; i < DCP_MAX_CHANS; i++)
 963		if (stat & (1 << i))
 964			complete(&sdcp->completion[i]);
 965
 966	return IRQ_HANDLED;
 967}
 968
 969static int mxs_dcp_probe(struct platform_device *pdev)
 970{
 971	struct device *dev = &pdev->dev;
 972	struct dcp *sdcp = NULL;
 973	int i, ret;
 974	int dcp_vmi_irq, dcp_irq;
 975
 976	if (global_sdcp) {
 977		dev_err(dev, "Only one DCP instance allowed!\n");
 978		return -ENODEV;
 979	}
 980
 981	dcp_vmi_irq = platform_get_irq(pdev, 0);
 982	if (dcp_vmi_irq < 0)
 983		return dcp_vmi_irq;
 984
 985	dcp_irq = platform_get_irq(pdev, 1);
 986	if (dcp_irq < 0)
 987		return dcp_irq;
 988
 989	sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL);
 990	if (!sdcp)
 991		return -ENOMEM;
 992
 993	sdcp->dev = dev;
 994	sdcp->base = devm_platform_ioremap_resource(pdev, 0);
 995	if (IS_ERR(sdcp->base))
 996		return PTR_ERR(sdcp->base);
 997
 998
 999	ret = devm_request_irq(dev, dcp_vmi_irq, mxs_dcp_irq, 0,
1000			       "dcp-vmi-irq", sdcp);
1001	if (ret) {
1002		dev_err(dev, "Failed to claim DCP VMI IRQ!\n");
1003		return ret;
1004	}
1005
1006	ret = devm_request_irq(dev, dcp_irq, mxs_dcp_irq, 0,
1007			       "dcp-irq", sdcp);
1008	if (ret) {
1009		dev_err(dev, "Failed to claim DCP IRQ!\n");
1010		return ret;
1011	}
1012
1013	/* Allocate coherent helper block. */
1014	sdcp->coh = devm_kzalloc(dev, sizeof(*sdcp->coh) + DCP_ALIGNMENT,
1015				   GFP_KERNEL);
1016	if (!sdcp->coh)
1017		return -ENOMEM;
1018
1019	/* Re-align the structure so it fits the DCP constraints. */
1020	sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT);
1021
1022	/* DCP clock is optional, only used on some SOCs */
1023	sdcp->dcp_clk = devm_clk_get_optional_enabled(dev, "dcp");
1024	if (IS_ERR(sdcp->dcp_clk))
1025		return PTR_ERR(sdcp->dcp_clk);
 
 
 
 
 
 
1026
1027	/* Restart the DCP block. */
1028	ret = stmp_reset_block(sdcp->base);
1029	if (ret) {
1030		dev_err(dev, "Failed reset\n");
1031		return ret;
1032	}
1033
1034	/* Initialize control register. */
1035	writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES |
1036	       MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING | 0xf,
1037	       sdcp->base + MXS_DCP_CTRL);
1038
1039	/* Enable all DCP DMA channels. */
1040	writel(MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK,
1041	       sdcp->base + MXS_DCP_CHANNELCTRL);
1042
1043	/*
1044	 * We do not enable context switching. Give the context buffer a
1045	 * pointer to an illegal address so if context switching is
1046	 * inadvertantly enabled, the DCP will return an error instead of
1047	 * trashing good memory. The DCP DMA cannot access ROM, so any ROM
1048	 * address will do.
1049	 */
1050	writel(0xffff0000, sdcp->base + MXS_DCP_CONTEXT);
1051	for (i = 0; i < DCP_MAX_CHANS; i++)
1052		writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(i));
1053	writel(0xffffffff, sdcp->base + MXS_DCP_STAT_CLR);
1054
1055	global_sdcp = sdcp;
1056
1057	platform_set_drvdata(pdev, sdcp);
1058
1059	for (i = 0; i < DCP_MAX_CHANS; i++) {
1060		spin_lock_init(&sdcp->lock[i]);
1061		init_completion(&sdcp->completion[i]);
1062		crypto_init_queue(&sdcp->queue[i], 50);
1063	}
1064
1065	/* Create the SHA and AES handler threads. */
1066	sdcp->thread[DCP_CHAN_HASH_SHA] = kthread_run(dcp_chan_thread_sha,
1067						      NULL, "mxs_dcp_chan/sha");
1068	if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) {
1069		dev_err(dev, "Error starting SHA thread!\n");
1070		ret = PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]);
1071		return ret;
1072	}
1073
1074	sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes,
1075						    NULL, "mxs_dcp_chan/aes");
1076	if (IS_ERR(sdcp->thread[DCP_CHAN_CRYPTO])) {
1077		dev_err(dev, "Error starting SHA thread!\n");
1078		ret = PTR_ERR(sdcp->thread[DCP_CHAN_CRYPTO]);
1079		goto err_destroy_sha_thread;
1080	}
1081
1082	/* Register the various crypto algorithms. */
1083	sdcp->caps = readl(sdcp->base + MXS_DCP_CAPABILITY1);
1084
1085	if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) {
1086		ret = crypto_register_skciphers(dcp_aes_algs,
1087						ARRAY_SIZE(dcp_aes_algs));
1088		if (ret) {
1089			/* Failed to register algorithm. */
1090			dev_err(dev, "Failed to register AES crypto!\n");
1091			goto err_destroy_aes_thread;
1092		}
1093	}
1094
1095	if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) {
1096		ret = crypto_register_ahash(&dcp_sha1_alg);
1097		if (ret) {
1098			dev_err(dev, "Failed to register %s hash!\n",
1099				dcp_sha1_alg.halg.base.cra_name);
1100			goto err_unregister_aes;
1101		}
1102	}
1103
1104	if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) {
1105		ret = crypto_register_ahash(&dcp_sha256_alg);
1106		if (ret) {
1107			dev_err(dev, "Failed to register %s hash!\n",
1108				dcp_sha256_alg.halg.base.cra_name);
1109			goto err_unregister_sha1;
1110		}
1111	}
1112
1113	return 0;
1114
1115err_unregister_sha1:
1116	if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
1117		crypto_unregister_ahash(&dcp_sha1_alg);
1118
1119err_unregister_aes:
1120	if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
1121		crypto_unregister_skciphers(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
1122
1123err_destroy_aes_thread:
1124	kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
1125
1126err_destroy_sha_thread:
1127	kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
1128
 
 
 
1129	return ret;
1130}
1131
1132static void mxs_dcp_remove(struct platform_device *pdev)
1133{
1134	struct dcp *sdcp = platform_get_drvdata(pdev);
1135
1136	if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256)
1137		crypto_unregister_ahash(&dcp_sha256_alg);
1138
1139	if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
1140		crypto_unregister_ahash(&dcp_sha1_alg);
1141
1142	if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
1143		crypto_unregister_skciphers(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
1144
1145	kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
1146	kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
1147
 
 
1148	platform_set_drvdata(pdev, NULL);
1149
1150	global_sdcp = NULL;
 
 
1151}
1152
1153static const struct of_device_id mxs_dcp_dt_ids[] = {
1154	{ .compatible = "fsl,imx23-dcp", .data = NULL, },
1155	{ .compatible = "fsl,imx28-dcp", .data = NULL, },
1156	{ /* sentinel */ }
1157};
1158
1159MODULE_DEVICE_TABLE(of, mxs_dcp_dt_ids);
1160
1161static struct platform_driver mxs_dcp_driver = {
1162	.probe	= mxs_dcp_probe,
1163	.remove_new = mxs_dcp_remove,
1164	.driver	= {
1165		.name		= "mxs-dcp",
1166		.of_match_table	= mxs_dcp_dt_ids,
1167	},
1168};
1169
1170module_platform_driver(mxs_dcp_driver);
1171
1172MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
1173MODULE_DESCRIPTION("Freescale MXS DCP Driver");
1174MODULE_LICENSE("GPL");
1175MODULE_ALIAS("platform:mxs-dcp");
v5.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Freescale i.MX23/i.MX28 Data Co-Processor driver
   4 *
   5 * Copyright (C) 2013 Marek Vasut <marex@denx.de>
   6 */
   7
   8#include <linux/dma-mapping.h>
   9#include <linux/interrupt.h>
  10#include <linux/io.h>
  11#include <linux/kernel.h>
  12#include <linux/kthread.h>
  13#include <linux/module.h>
  14#include <linux/of.h>
  15#include <linux/platform_device.h>
  16#include <linux/stmp_device.h>
  17#include <linux/clk.h>
  18
  19#include <crypto/aes.h>
  20#include <crypto/sha.h>
 
  21#include <crypto/internal/hash.h>
  22#include <crypto/internal/skcipher.h>
 
  23
  24#define DCP_MAX_CHANS	4
  25#define DCP_BUF_SZ	PAGE_SIZE
  26#define DCP_SHA_PAY_SZ  64
  27
  28#define DCP_ALIGNMENT	64
  29
  30/*
  31 * Null hashes to align with hw behavior on imx6sl and ull
  32 * these are flipped for consistency with hw output
  33 */
  34static const uint8_t sha1_null_hash[] =
  35	"\x09\x07\xd8\xaf\x90\x18\x60\x95\xef\xbf"
  36	"\x55\x32\x0d\x4b\x6b\x5e\xee\xa3\x39\xda";
  37
  38static const uint8_t sha256_null_hash[] =
  39	"\x55\xb8\x52\x78\x1b\x99\x95\xa4"
  40	"\x4c\x93\x9b\x64\xe4\x41\xae\x27"
  41	"\x24\xb9\x6f\x99\xc8\xf4\xfb\x9a"
  42	"\x14\x1c\xfc\x98\x42\xc4\xb0\xe3";
  43
  44/* DCP DMA descriptor. */
  45struct dcp_dma_desc {
  46	uint32_t	next_cmd_addr;
  47	uint32_t	control0;
  48	uint32_t	control1;
  49	uint32_t	source;
  50	uint32_t	destination;
  51	uint32_t	size;
  52	uint32_t	payload;
  53	uint32_t	status;
  54};
  55
  56/* Coherent aligned block for bounce buffering. */
  57struct dcp_coherent_block {
  58	uint8_t			aes_in_buf[DCP_BUF_SZ];
  59	uint8_t			aes_out_buf[DCP_BUF_SZ];
  60	uint8_t			sha_in_buf[DCP_BUF_SZ];
  61	uint8_t			sha_out_buf[DCP_SHA_PAY_SZ];
  62
  63	uint8_t			aes_key[2 * AES_KEYSIZE_128];
  64
  65	struct dcp_dma_desc	desc[DCP_MAX_CHANS];
  66};
  67
  68struct dcp {
  69	struct device			*dev;
  70	void __iomem			*base;
  71
  72	uint32_t			caps;
  73
  74	struct dcp_coherent_block	*coh;
  75
  76	struct completion		completion[DCP_MAX_CHANS];
  77	spinlock_t			lock[DCP_MAX_CHANS];
  78	struct task_struct		*thread[DCP_MAX_CHANS];
  79	struct crypto_queue		queue[DCP_MAX_CHANS];
  80	struct clk			*dcp_clk;
  81};
  82
  83enum dcp_chan {
  84	DCP_CHAN_HASH_SHA	= 0,
  85	DCP_CHAN_CRYPTO		= 2,
  86};
  87
  88struct dcp_async_ctx {
  89	/* Common context */
  90	enum dcp_chan	chan;
  91	uint32_t	fill;
  92
  93	/* SHA Hash-specific context */
  94	struct mutex			mutex;
  95	uint32_t			alg;
  96	unsigned int			hot:1;
  97
  98	/* Crypto-specific context */
  99	struct crypto_sync_skcipher	*fallback;
 100	unsigned int			key_len;
 101	uint8_t				key[AES_KEYSIZE_128];
 102};
 103
 104struct dcp_aes_req_ctx {
 105	unsigned int	enc:1;
 106	unsigned int	ecb:1;
 
 107};
 108
 109struct dcp_sha_req_ctx {
 110	unsigned int	init:1;
 111	unsigned int	fini:1;
 112};
 113
 114struct dcp_export_state {
 115	struct dcp_sha_req_ctx req_ctx;
 116	struct dcp_async_ctx async_ctx;
 117};
 118
 119/*
 120 * There can even be only one instance of the MXS DCP due to the
 121 * design of Linux Crypto API.
 122 */
 123static struct dcp *global_sdcp;
 124
 125/* DCP register layout. */
 126#define MXS_DCP_CTRL				0x00
 127#define MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES	(1 << 23)
 128#define MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING	(1 << 22)
 129
 130#define MXS_DCP_STAT				0x10
 131#define MXS_DCP_STAT_CLR			0x18
 132#define MXS_DCP_STAT_IRQ_MASK			0xf
 133
 134#define MXS_DCP_CHANNELCTRL			0x20
 135#define MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK	0xff
 136
 137#define MXS_DCP_CAPABILITY1			0x40
 138#define MXS_DCP_CAPABILITY1_SHA256		(4 << 16)
 139#define MXS_DCP_CAPABILITY1_SHA1		(1 << 16)
 140#define MXS_DCP_CAPABILITY1_AES128		(1 << 0)
 141
 142#define MXS_DCP_CONTEXT				0x50
 143
 144#define MXS_DCP_CH_N_CMDPTR(n)			(0x100 + ((n) * 0x40))
 145
 146#define MXS_DCP_CH_N_SEMA(n)			(0x110 + ((n) * 0x40))
 147
 148#define MXS_DCP_CH_N_STAT(n)			(0x120 + ((n) * 0x40))
 149#define MXS_DCP_CH_N_STAT_CLR(n)		(0x128 + ((n) * 0x40))
 150
 151/* DMA descriptor bits. */
 152#define MXS_DCP_CONTROL0_HASH_TERM		(1 << 13)
 153#define MXS_DCP_CONTROL0_HASH_INIT		(1 << 12)
 154#define MXS_DCP_CONTROL0_PAYLOAD_KEY		(1 << 11)
 155#define MXS_DCP_CONTROL0_CIPHER_ENCRYPT		(1 << 8)
 156#define MXS_DCP_CONTROL0_CIPHER_INIT		(1 << 9)
 157#define MXS_DCP_CONTROL0_ENABLE_HASH		(1 << 6)
 158#define MXS_DCP_CONTROL0_ENABLE_CIPHER		(1 << 5)
 159#define MXS_DCP_CONTROL0_DECR_SEMAPHORE		(1 << 1)
 160#define MXS_DCP_CONTROL0_INTERRUPT		(1 << 0)
 161
 162#define MXS_DCP_CONTROL1_HASH_SELECT_SHA256	(2 << 16)
 163#define MXS_DCP_CONTROL1_HASH_SELECT_SHA1	(0 << 16)
 164#define MXS_DCP_CONTROL1_CIPHER_MODE_CBC	(1 << 4)
 165#define MXS_DCP_CONTROL1_CIPHER_MODE_ECB	(0 << 4)
 166#define MXS_DCP_CONTROL1_CIPHER_SELECT_AES128	(0 << 0)
 167
 168static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
 169{
 
 170	struct dcp *sdcp = global_sdcp;
 171	const int chan = actx->chan;
 172	uint32_t stat;
 173	unsigned long ret;
 174	struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
 175
 176	dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc),
 177					      DMA_TO_DEVICE);
 178
 
 
 
 
 179	reinit_completion(&sdcp->completion[chan]);
 180
 181	/* Clear status register. */
 182	writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(chan));
 183
 184	/* Load the DMA descriptor. */
 185	writel(desc_phys, sdcp->base + MXS_DCP_CH_N_CMDPTR(chan));
 186
 187	/* Increment the semaphore to start the DMA transfer. */
 188	writel(1, sdcp->base + MXS_DCP_CH_N_SEMA(chan));
 189
 190	ret = wait_for_completion_timeout(&sdcp->completion[chan],
 191					  msecs_to_jiffies(1000));
 192	if (!ret) {
 193		dev_err(sdcp->dev, "Channel %i timeout (DCP_STAT=0x%08x)\n",
 194			chan, readl(sdcp->base + MXS_DCP_STAT));
 195		return -ETIMEDOUT;
 196	}
 197
 198	stat = readl(sdcp->base + MXS_DCP_CH_N_STAT(chan));
 199	if (stat & 0xff) {
 200		dev_err(sdcp->dev, "Channel %i error (CH_STAT=0x%08x)\n",
 201			chan, stat);
 202		return -EINVAL;
 203	}
 204
 205	dma_unmap_single(sdcp->dev, desc_phys, sizeof(*desc), DMA_TO_DEVICE);
 206
 207	return 0;
 208}
 209
 210/*
 211 * Encryption (AES128)
 212 */
 213static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
 214			   struct ablkcipher_request *req, int init)
 215{
 
 216	struct dcp *sdcp = global_sdcp;
 217	struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
 218	struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
 219	int ret;
 220
 221	dma_addr_t key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
 222					     2 * AES_KEYSIZE_128,
 223					     DMA_TO_DEVICE);
 224	dma_addr_t src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
 225					     DCP_BUF_SZ, DMA_TO_DEVICE);
 226	dma_addr_t dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
 227					     DCP_BUF_SZ, DMA_FROM_DEVICE);
 
 
 
 
 
 
 
 
 
 
 228
 229	if (actx->fill % AES_BLOCK_SIZE) {
 230		dev_err(sdcp->dev, "Invalid block size!\n");
 231		ret = -EINVAL;
 232		goto aes_done_run;
 233	}
 234
 235	/* Fill in the DMA descriptor. */
 236	desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
 237		    MXS_DCP_CONTROL0_INTERRUPT |
 238		    MXS_DCP_CONTROL0_ENABLE_CIPHER;
 239
 240	/* Payload contains the key. */
 241	desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY;
 242
 243	if (rctx->enc)
 244		desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT;
 245	if (init)
 246		desc->control0 |= MXS_DCP_CONTROL0_CIPHER_INIT;
 247
 248	desc->control1 = MXS_DCP_CONTROL1_CIPHER_SELECT_AES128;
 249
 250	if (rctx->ecb)
 251		desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_ECB;
 252	else
 253		desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_CBC;
 254
 255	desc->next_cmd_addr = 0;
 256	desc->source = src_phys;
 257	desc->destination = dst_phys;
 258	desc->size = actx->fill;
 259	desc->payload = key_phys;
 260	desc->status = 0;
 261
 262	ret = mxs_dcp_start_dma(actx);
 263
 264aes_done_run:
 
 
 
 
 265	dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
 266			 DMA_TO_DEVICE);
 267	dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
 268	dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE);
 269
 270	return ret;
 271}
 272
 273static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
 274{
 275	struct dcp *sdcp = global_sdcp;
 276
 277	struct ablkcipher_request *req = ablkcipher_request_cast(arq);
 278	struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
 279	struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
 280
 281	struct scatterlist *dst = req->dst;
 282	struct scatterlist *src = req->src;
 283	const int nents = sg_nents(req->src);
 284
 285	const int out_off = DCP_BUF_SZ;
 286	uint8_t *in_buf = sdcp->coh->aes_in_buf;
 287	uint8_t *out_buf = sdcp->coh->aes_out_buf;
 288
 289	uint8_t *out_tmp, *src_buf, *dst_buf = NULL;
 290	uint32_t dst_off = 0;
 
 291	uint32_t last_out_len = 0;
 292
 293	uint8_t *key = sdcp->coh->aes_key;
 294
 295	int ret = 0;
 296	int split = 0;
 297	unsigned int i, len, clen, rem = 0, tlen = 0;
 298	int init = 0;
 299	bool limit_hit = false;
 300
 301	actx->fill = 0;
 302
 303	/* Copy the key from the temporary location. */
 304	memcpy(key, actx->key, actx->key_len);
 305
 306	if (!rctx->ecb) {
 307		/* Copy the CBC IV just past the key. */
 308		memcpy(key + AES_KEYSIZE_128, req->info, AES_KEYSIZE_128);
 309		/* CBC needs the INIT set. */
 310		init = 1;
 311	} else {
 312		memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128);
 313	}
 314
 315	for_each_sg(req->src, src, nents, i) {
 316		src_buf = sg_virt(src);
 317		len = sg_dma_len(src);
 318		tlen += len;
 319		limit_hit = tlen > req->nbytes;
 320
 321		if (limit_hit)
 322			len = req->nbytes - (tlen - len);
 323
 324		do {
 325			if (actx->fill + len > out_off)
 326				clen = out_off - actx->fill;
 327			else
 328				clen = len;
 329
 330			memcpy(in_buf + actx->fill, src_buf, clen);
 331			len -= clen;
 332			src_buf += clen;
 333			actx->fill += clen;
 334
 335			/*
 336			 * If we filled the buffer or this is the last SG,
 337			 * submit the buffer.
 338			 */
 339			if (actx->fill == out_off || sg_is_last(src) ||
 340				limit_hit) {
 341				ret = mxs_dcp_run_aes(actx, req, init);
 342				if (ret)
 343					return ret;
 344				init = 0;
 345
 346				out_tmp = out_buf;
 
 
 347				last_out_len = actx->fill;
 348				while (dst && actx->fill) {
 349					if (!split) {
 350						dst_buf = sg_virt(dst);
 351						dst_off = 0;
 352					}
 353					rem = min(sg_dma_len(dst) - dst_off,
 354						  actx->fill);
 355
 356					memcpy(dst_buf + dst_off, out_tmp, rem);
 357					out_tmp += rem;
 358					dst_off += rem;
 359					actx->fill -= rem;
 360
 361					if (dst_off == sg_dma_len(dst)) {
 362						dst = sg_next(dst);
 363						split = 0;
 364					} else {
 365						split = 1;
 366					}
 367				}
 368			}
 369		} while (len);
 370
 371		if (limit_hit)
 372			break;
 373	}
 374
 375	/* Copy the IV for CBC for chaining */
 376	if (!rctx->ecb) {
 377		if (rctx->enc)
 378			memcpy(req->info, out_buf+(last_out_len-AES_BLOCK_SIZE),
 379				AES_BLOCK_SIZE);
 380		else
 381			memcpy(req->info, in_buf+(last_out_len-AES_BLOCK_SIZE),
 382				AES_BLOCK_SIZE);
 383	}
 384
 385	return ret;
 386}
 387
 388static int dcp_chan_thread_aes(void *data)
 389{
 390	struct dcp *sdcp = global_sdcp;
 391	const int chan = DCP_CHAN_CRYPTO;
 392
 393	struct crypto_async_request *backlog;
 394	struct crypto_async_request *arq;
 395
 396	int ret;
 397
 398	while (!kthread_should_stop()) {
 399		set_current_state(TASK_INTERRUPTIBLE);
 400
 401		spin_lock(&sdcp->lock[chan]);
 402		backlog = crypto_get_backlog(&sdcp->queue[chan]);
 403		arq = crypto_dequeue_request(&sdcp->queue[chan]);
 404		spin_unlock(&sdcp->lock[chan]);
 405
 406		if (!backlog && !arq) {
 407			schedule();
 408			continue;
 409		}
 410
 411		set_current_state(TASK_RUNNING);
 412
 413		if (backlog)
 414			backlog->complete(backlog, -EINPROGRESS);
 415
 416		if (arq) {
 417			ret = mxs_dcp_aes_block_crypt(arq);
 418			arq->complete(arq, ret);
 419		}
 420	}
 421
 422	return 0;
 423}
 424
 425static int mxs_dcp_block_fallback(struct ablkcipher_request *req, int enc)
 426{
 427	struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
 428	struct dcp_async_ctx *ctx = crypto_ablkcipher_ctx(tfm);
 429	SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
 430	int ret;
 431
 432	skcipher_request_set_sync_tfm(subreq, ctx->fallback);
 433	skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL);
 434	skcipher_request_set_crypt(subreq, req->src, req->dst,
 435				   req->nbytes, req->info);
 
 436
 437	if (enc)
 438		ret = crypto_skcipher_encrypt(subreq);
 439	else
 440		ret = crypto_skcipher_decrypt(subreq);
 441
 442	skcipher_request_zero(subreq);
 443
 444	return ret;
 445}
 446
 447static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb)
 448{
 449	struct dcp *sdcp = global_sdcp;
 450	struct crypto_async_request *arq = &req->base;
 451	struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
 452	struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
 453	int ret;
 454
 455	if (unlikely(actx->key_len != AES_KEYSIZE_128))
 456		return mxs_dcp_block_fallback(req, enc);
 457
 458	rctx->enc = enc;
 459	rctx->ecb = ecb;
 460	actx->chan = DCP_CHAN_CRYPTO;
 461
 462	spin_lock(&sdcp->lock[actx->chan]);
 463	ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
 464	spin_unlock(&sdcp->lock[actx->chan]);
 465
 466	wake_up_process(sdcp->thread[actx->chan]);
 467
 468	return ret;
 469}
 470
 471static int mxs_dcp_aes_ecb_decrypt(struct ablkcipher_request *req)
 472{
 473	return mxs_dcp_aes_enqueue(req, 0, 1);
 474}
 475
 476static int mxs_dcp_aes_ecb_encrypt(struct ablkcipher_request *req)
 477{
 478	return mxs_dcp_aes_enqueue(req, 1, 1);
 479}
 480
 481static int mxs_dcp_aes_cbc_decrypt(struct ablkcipher_request *req)
 482{
 483	return mxs_dcp_aes_enqueue(req, 0, 0);
 484}
 485
 486static int mxs_dcp_aes_cbc_encrypt(struct ablkcipher_request *req)
 487{
 488	return mxs_dcp_aes_enqueue(req, 1, 0);
 489}
 490
 491static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
 492			      unsigned int len)
 493{
 494	struct dcp_async_ctx *actx = crypto_ablkcipher_ctx(tfm);
 495	unsigned int ret;
 496
 497	/*
 498	 * AES 128 is supposed by the hardware, store key into temporary
 499	 * buffer and exit. We must use the temporary buffer here, since
 500	 * there can still be an operation in progress.
 501	 */
 502	actx->key_len = len;
 503	if (len == AES_KEYSIZE_128) {
 504		memcpy(actx->key, key, len);
 505		return 0;
 506	}
 507
 508	/*
 509	 * If the requested AES key size is not supported by the hardware,
 510	 * but is supported by in-kernel software implementation, we use
 511	 * software fallback.
 512	 */
 513	crypto_sync_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK);
 514	crypto_sync_skcipher_set_flags(actx->fallback,
 515				  tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
 516
 517	ret = crypto_sync_skcipher_setkey(actx->fallback, key, len);
 518	if (!ret)
 519		return 0;
 520
 521	tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
 522	tfm->base.crt_flags |= crypto_sync_skcipher_get_flags(actx->fallback) &
 523			       CRYPTO_TFM_RES_MASK;
 524
 525	return ret;
 526}
 527
 528static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm)
 529{
 530	const char *name = crypto_tfm_alg_name(tfm);
 531	struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
 532	struct crypto_sync_skcipher *blk;
 533
 534	blk = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
 535	if (IS_ERR(blk))
 536		return PTR_ERR(blk);
 537
 538	actx->fallback = blk;
 539	tfm->crt_ablkcipher.reqsize = sizeof(struct dcp_aes_req_ctx);
 
 540	return 0;
 541}
 542
 543static void mxs_dcp_aes_fallback_exit(struct crypto_tfm *tfm)
 544{
 545	struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
 546
 547	crypto_free_sync_skcipher(actx->fallback);
 548}
 549
 550/*
 551 * Hashing (SHA1/SHA256)
 552 */
 553static int mxs_dcp_run_sha(struct ahash_request *req)
 554{
 555	struct dcp *sdcp = global_sdcp;
 556	int ret;
 557
 558	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 559	struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
 560	struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
 561	struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
 562
 563	dma_addr_t digest_phys = 0;
 564	dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf,
 565					     DCP_BUF_SZ, DMA_TO_DEVICE);
 566
 
 
 
 
 567	/* Fill in the DMA descriptor. */
 568	desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
 569		    MXS_DCP_CONTROL0_INTERRUPT |
 570		    MXS_DCP_CONTROL0_ENABLE_HASH;
 571	if (rctx->init)
 572		desc->control0 |= MXS_DCP_CONTROL0_HASH_INIT;
 573
 574	desc->control1 = actx->alg;
 575	desc->next_cmd_addr = 0;
 576	desc->source = buf_phys;
 577	desc->destination = 0;
 578	desc->size = actx->fill;
 579	desc->payload = 0;
 580	desc->status = 0;
 581
 582	/*
 583	 * Align driver with hw behavior when generating null hashes
 584	 */
 585	if (rctx->init && rctx->fini && desc->size == 0) {
 586		struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
 587		const uint8_t *sha_buf =
 588			(actx->alg == MXS_DCP_CONTROL1_HASH_SELECT_SHA1) ?
 589			sha1_null_hash : sha256_null_hash;
 590		memcpy(sdcp->coh->sha_out_buf, sha_buf, halg->digestsize);
 591		ret = 0;
 592		goto done_run;
 593	}
 594
 595	/* Set HASH_TERM bit for last transfer block. */
 596	if (rctx->fini) {
 597		digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf,
 598					     DCP_SHA_PAY_SZ, DMA_FROM_DEVICE);
 
 
 
 
 599		desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM;
 600		desc->payload = digest_phys;
 601	}
 602
 603	ret = mxs_dcp_start_dma(actx);
 604
 605	if (rctx->fini)
 606		dma_unmap_single(sdcp->dev, digest_phys, DCP_SHA_PAY_SZ,
 607				 DMA_FROM_DEVICE);
 608
 609done_run:
 610	dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
 611
 612	return ret;
 613}
 614
 615static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
 616{
 617	struct dcp *sdcp = global_sdcp;
 618
 619	struct ahash_request *req = ahash_request_cast(arq);
 620	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 621	struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
 622	struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
 623	struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
 624	const int nents = sg_nents(req->src);
 625
 626	uint8_t *in_buf = sdcp->coh->sha_in_buf;
 627	uint8_t *out_buf = sdcp->coh->sha_out_buf;
 628
 629	uint8_t *src_buf;
 630
 631	struct scatterlist *src;
 632
 633	unsigned int i, len, clen;
 634	int ret;
 635
 636	int fin = rctx->fini;
 637	if (fin)
 638		rctx->fini = 0;
 639
 640	for_each_sg(req->src, src, nents, i) {
 641		src_buf = sg_virt(src);
 642		len = sg_dma_len(src);
 643
 644		do {
 645			if (actx->fill + len > DCP_BUF_SZ)
 646				clen = DCP_BUF_SZ - actx->fill;
 647			else
 648				clen = len;
 649
 650			memcpy(in_buf + actx->fill, src_buf, clen);
 651			len -= clen;
 652			src_buf += clen;
 653			actx->fill += clen;
 654
 655			/*
 656			 * If we filled the buffer and still have some
 657			 * more data, submit the buffer.
 658			 */
 659			if (len && actx->fill == DCP_BUF_SZ) {
 660				ret = mxs_dcp_run_sha(req);
 661				if (ret)
 662					return ret;
 663				actx->fill = 0;
 664				rctx->init = 0;
 665			}
 666		} while (len);
 
 
 
 667	}
 668
 669	if (fin) {
 670		rctx->fini = 1;
 671
 672		/* Submit whatever is left. */
 673		if (!req->result)
 674			return -EINVAL;
 675
 676		ret = mxs_dcp_run_sha(req);
 677		if (ret)
 678			return ret;
 679
 680		actx->fill = 0;
 681
 682		/* For some reason the result is flipped */
 683		for (i = 0; i < halg->digestsize; i++)
 684			req->result[i] = out_buf[halg->digestsize - i - 1];
 685	}
 686
 687	return 0;
 688}
 689
 690static int dcp_chan_thread_sha(void *data)
 691{
 692	struct dcp *sdcp = global_sdcp;
 693	const int chan = DCP_CHAN_HASH_SHA;
 694
 695	struct crypto_async_request *backlog;
 696	struct crypto_async_request *arq;
 697	int ret;
 698
 699	while (!kthread_should_stop()) {
 700		set_current_state(TASK_INTERRUPTIBLE);
 701
 702		spin_lock(&sdcp->lock[chan]);
 703		backlog = crypto_get_backlog(&sdcp->queue[chan]);
 704		arq = crypto_dequeue_request(&sdcp->queue[chan]);
 705		spin_unlock(&sdcp->lock[chan]);
 706
 707		if (!backlog && !arq) {
 708			schedule();
 709			continue;
 710		}
 711
 712		set_current_state(TASK_RUNNING);
 713
 714		if (backlog)
 715			backlog->complete(backlog, -EINPROGRESS);
 716
 717		if (arq) {
 718			ret = dcp_sha_req_to_buf(arq);
 719			arq->complete(arq, ret);
 720		}
 721	}
 722
 723	return 0;
 724}
 725
 726static int dcp_sha_init(struct ahash_request *req)
 727{
 728	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 729	struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
 730
 731	struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
 732
 733	/*
 734	 * Start hashing session. The code below only inits the
 735	 * hashing session context, nothing more.
 736	 */
 737	memset(actx, 0, sizeof(*actx));
 738
 739	if (strcmp(halg->base.cra_name, "sha1") == 0)
 740		actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA1;
 741	else
 742		actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA256;
 743
 744	actx->fill = 0;
 745	actx->hot = 0;
 746	actx->chan = DCP_CHAN_HASH_SHA;
 747
 748	mutex_init(&actx->mutex);
 749
 750	return 0;
 751}
 752
 753static int dcp_sha_update_fx(struct ahash_request *req, int fini)
 754{
 755	struct dcp *sdcp = global_sdcp;
 756
 757	struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
 758	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 759	struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
 760
 761	int ret;
 762
 763	/*
 764	 * Ignore requests that have no data in them and are not
 765	 * the trailing requests in the stream of requests.
 766	 */
 767	if (!req->nbytes && !fini)
 768		return 0;
 769
 770	mutex_lock(&actx->mutex);
 771
 772	rctx->fini = fini;
 773
 774	if (!actx->hot) {
 775		actx->hot = 1;
 776		rctx->init = 1;
 777	}
 778
 779	spin_lock(&sdcp->lock[actx->chan]);
 780	ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
 781	spin_unlock(&sdcp->lock[actx->chan]);
 782
 783	wake_up_process(sdcp->thread[actx->chan]);
 784	mutex_unlock(&actx->mutex);
 785
 786	return ret;
 787}
 788
 789static int dcp_sha_update(struct ahash_request *req)
 790{
 791	return dcp_sha_update_fx(req, 0);
 792}
 793
 794static int dcp_sha_final(struct ahash_request *req)
 795{
 796	ahash_request_set_crypt(req, NULL, req->result, 0);
 797	req->nbytes = 0;
 798	return dcp_sha_update_fx(req, 1);
 799}
 800
 801static int dcp_sha_finup(struct ahash_request *req)
 802{
 803	return dcp_sha_update_fx(req, 1);
 804}
 805
 806static int dcp_sha_digest(struct ahash_request *req)
 807{
 808	int ret;
 809
 810	ret = dcp_sha_init(req);
 811	if (ret)
 812		return ret;
 813
 814	return dcp_sha_finup(req);
 815}
 816
 817static int dcp_sha_import(struct ahash_request *req, const void *in)
 818{
 819	struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
 820	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 821	struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
 822	const struct dcp_export_state *export = in;
 823
 824	memset(rctx, 0, sizeof(struct dcp_sha_req_ctx));
 825	memset(actx, 0, sizeof(struct dcp_async_ctx));
 826	memcpy(rctx, &export->req_ctx, sizeof(struct dcp_sha_req_ctx));
 827	memcpy(actx, &export->async_ctx, sizeof(struct dcp_async_ctx));
 828
 829	return 0;
 830}
 831
 832static int dcp_sha_export(struct ahash_request *req, void *out)
 833{
 834	struct dcp_sha_req_ctx *rctx_state = ahash_request_ctx(req);
 835	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 836	struct dcp_async_ctx *actx_state = crypto_ahash_ctx(tfm);
 837	struct dcp_export_state *export = out;
 838
 839	memcpy(&export->req_ctx, rctx_state, sizeof(struct dcp_sha_req_ctx));
 840	memcpy(&export->async_ctx, actx_state, sizeof(struct dcp_async_ctx));
 841
 842	return 0;
 843}
 844
 845static int dcp_sha_cra_init(struct crypto_tfm *tfm)
 846{
 847	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 848				 sizeof(struct dcp_sha_req_ctx));
 849	return 0;
 850}
 851
 852static void dcp_sha_cra_exit(struct crypto_tfm *tfm)
 853{
 854}
 855
 856/* AES 128 ECB and AES 128 CBC */
 857static struct crypto_alg dcp_aes_algs[] = {
 858	{
 859		.cra_name		= "ecb(aes)",
 860		.cra_driver_name	= "ecb-aes-dcp",
 861		.cra_priority		= 400,
 862		.cra_alignmask		= 15,
 863		.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
 864					  CRYPTO_ALG_ASYNC |
 865					  CRYPTO_ALG_NEED_FALLBACK,
 866		.cra_init		= mxs_dcp_aes_fallback_init,
 867		.cra_exit		= mxs_dcp_aes_fallback_exit,
 868		.cra_blocksize		= AES_BLOCK_SIZE,
 869		.cra_ctxsize		= sizeof(struct dcp_async_ctx),
 870		.cra_type		= &crypto_ablkcipher_type,
 871		.cra_module		= THIS_MODULE,
 872		.cra_u	= {
 873			.ablkcipher = {
 874				.min_keysize	= AES_MIN_KEY_SIZE,
 875				.max_keysize	= AES_MAX_KEY_SIZE,
 876				.setkey		= mxs_dcp_aes_setkey,
 877				.encrypt	= mxs_dcp_aes_ecb_encrypt,
 878				.decrypt	= mxs_dcp_aes_ecb_decrypt
 879			},
 880		},
 881	}, {
 882		.cra_name		= "cbc(aes)",
 883		.cra_driver_name	= "cbc-aes-dcp",
 884		.cra_priority		= 400,
 885		.cra_alignmask		= 15,
 886		.cra_flags		= CRYPTO_ALG_TYPE_ABLKCIPHER |
 887					  CRYPTO_ALG_ASYNC |
 888					  CRYPTO_ALG_NEED_FALLBACK,
 889		.cra_init		= mxs_dcp_aes_fallback_init,
 890		.cra_exit		= mxs_dcp_aes_fallback_exit,
 891		.cra_blocksize		= AES_BLOCK_SIZE,
 892		.cra_ctxsize		= sizeof(struct dcp_async_ctx),
 893		.cra_type		= &crypto_ablkcipher_type,
 894		.cra_module		= THIS_MODULE,
 895		.cra_u = {
 896			.ablkcipher = {
 897				.min_keysize	= AES_MIN_KEY_SIZE,
 898				.max_keysize	= AES_MAX_KEY_SIZE,
 899				.setkey		= mxs_dcp_aes_setkey,
 900				.encrypt	= mxs_dcp_aes_cbc_encrypt,
 901				.decrypt	= mxs_dcp_aes_cbc_decrypt,
 902				.ivsize		= AES_BLOCK_SIZE,
 903			},
 904		},
 905	},
 906};
 907
 908/* SHA1 */
 909static struct ahash_alg dcp_sha1_alg = {
 910	.init	= dcp_sha_init,
 911	.update	= dcp_sha_update,
 912	.final	= dcp_sha_final,
 913	.finup	= dcp_sha_finup,
 914	.digest	= dcp_sha_digest,
 915	.import = dcp_sha_import,
 916	.export = dcp_sha_export,
 917	.halg	= {
 918		.digestsize	= SHA1_DIGEST_SIZE,
 919		.statesize	= sizeof(struct dcp_export_state),
 920		.base		= {
 921			.cra_name		= "sha1",
 922			.cra_driver_name	= "sha1-dcp",
 923			.cra_priority		= 400,
 924			.cra_alignmask		= 63,
 925			.cra_flags		= CRYPTO_ALG_ASYNC,
 926			.cra_blocksize		= SHA1_BLOCK_SIZE,
 927			.cra_ctxsize		= sizeof(struct dcp_async_ctx),
 928			.cra_module		= THIS_MODULE,
 929			.cra_init		= dcp_sha_cra_init,
 930			.cra_exit		= dcp_sha_cra_exit,
 931		},
 932	},
 933};
 934
 935/* SHA256 */
 936static struct ahash_alg dcp_sha256_alg = {
 937	.init	= dcp_sha_init,
 938	.update	= dcp_sha_update,
 939	.final	= dcp_sha_final,
 940	.finup	= dcp_sha_finup,
 941	.digest	= dcp_sha_digest,
 942	.import = dcp_sha_import,
 943	.export = dcp_sha_export,
 944	.halg	= {
 945		.digestsize	= SHA256_DIGEST_SIZE,
 946		.statesize	= sizeof(struct dcp_export_state),
 947		.base		= {
 948			.cra_name		= "sha256",
 949			.cra_driver_name	= "sha256-dcp",
 950			.cra_priority		= 400,
 951			.cra_alignmask		= 63,
 952			.cra_flags		= CRYPTO_ALG_ASYNC,
 953			.cra_blocksize		= SHA256_BLOCK_SIZE,
 954			.cra_ctxsize		= sizeof(struct dcp_async_ctx),
 955			.cra_module		= THIS_MODULE,
 956			.cra_init		= dcp_sha_cra_init,
 957			.cra_exit		= dcp_sha_cra_exit,
 958		},
 959	},
 960};
 961
 962static irqreturn_t mxs_dcp_irq(int irq, void *context)
 963{
 964	struct dcp *sdcp = context;
 965	uint32_t stat;
 966	int i;
 967
 968	stat = readl(sdcp->base + MXS_DCP_STAT);
 969	stat &= MXS_DCP_STAT_IRQ_MASK;
 970	if (!stat)
 971		return IRQ_NONE;
 972
 973	/* Clear the interrupts. */
 974	writel(stat, sdcp->base + MXS_DCP_STAT_CLR);
 975
 976	/* Complete the DMA requests that finished. */
 977	for (i = 0; i < DCP_MAX_CHANS; i++)
 978		if (stat & (1 << i))
 979			complete(&sdcp->completion[i]);
 980
 981	return IRQ_HANDLED;
 982}
 983
 984static int mxs_dcp_probe(struct platform_device *pdev)
 985{
 986	struct device *dev = &pdev->dev;
 987	struct dcp *sdcp = NULL;
 988	int i, ret;
 989	int dcp_vmi_irq, dcp_irq;
 990
 991	if (global_sdcp) {
 992		dev_err(dev, "Only one DCP instance allowed!\n");
 993		return -ENODEV;
 994	}
 995
 996	dcp_vmi_irq = platform_get_irq(pdev, 0);
 997	if (dcp_vmi_irq < 0)
 998		return dcp_vmi_irq;
 999
1000	dcp_irq = platform_get_irq(pdev, 1);
1001	if (dcp_irq < 0)
1002		return dcp_irq;
1003
1004	sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL);
1005	if (!sdcp)
1006		return -ENOMEM;
1007
1008	sdcp->dev = dev;
1009	sdcp->base = devm_platform_ioremap_resource(pdev, 0);
1010	if (IS_ERR(sdcp->base))
1011		return PTR_ERR(sdcp->base);
1012
1013
1014	ret = devm_request_irq(dev, dcp_vmi_irq, mxs_dcp_irq, 0,
1015			       "dcp-vmi-irq", sdcp);
1016	if (ret) {
1017		dev_err(dev, "Failed to claim DCP VMI IRQ!\n");
1018		return ret;
1019	}
1020
1021	ret = devm_request_irq(dev, dcp_irq, mxs_dcp_irq, 0,
1022			       "dcp-irq", sdcp);
1023	if (ret) {
1024		dev_err(dev, "Failed to claim DCP IRQ!\n");
1025		return ret;
1026	}
1027
1028	/* Allocate coherent helper block. */
1029	sdcp->coh = devm_kzalloc(dev, sizeof(*sdcp->coh) + DCP_ALIGNMENT,
1030				   GFP_KERNEL);
1031	if (!sdcp->coh)
1032		return -ENOMEM;
1033
1034	/* Re-align the structure so it fits the DCP constraints. */
1035	sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT);
1036
1037	/* DCP clock is optional, only used on some SOCs */
1038	sdcp->dcp_clk = devm_clk_get(dev, "dcp");
1039	if (IS_ERR(sdcp->dcp_clk)) {
1040		if (sdcp->dcp_clk != ERR_PTR(-ENOENT))
1041			return PTR_ERR(sdcp->dcp_clk);
1042		sdcp->dcp_clk = NULL;
1043	}
1044	ret = clk_prepare_enable(sdcp->dcp_clk);
1045	if (ret)
1046		return ret;
1047
1048	/* Restart the DCP block. */
1049	ret = stmp_reset_block(sdcp->base);
1050	if (ret) {
1051		dev_err(dev, "Failed reset\n");
1052		goto err_disable_unprepare_clk;
1053	}
1054
1055	/* Initialize control register. */
1056	writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES |
1057	       MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING | 0xf,
1058	       sdcp->base + MXS_DCP_CTRL);
1059
1060	/* Enable all DCP DMA channels. */
1061	writel(MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK,
1062	       sdcp->base + MXS_DCP_CHANNELCTRL);
1063
1064	/*
1065	 * We do not enable context switching. Give the context buffer a
1066	 * pointer to an illegal address so if context switching is
1067	 * inadvertantly enabled, the DCP will return an error instead of
1068	 * trashing good memory. The DCP DMA cannot access ROM, so any ROM
1069	 * address will do.
1070	 */
1071	writel(0xffff0000, sdcp->base + MXS_DCP_CONTEXT);
1072	for (i = 0; i < DCP_MAX_CHANS; i++)
1073		writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(i));
1074	writel(0xffffffff, sdcp->base + MXS_DCP_STAT_CLR);
1075
1076	global_sdcp = sdcp;
1077
1078	platform_set_drvdata(pdev, sdcp);
1079
1080	for (i = 0; i < DCP_MAX_CHANS; i++) {
1081		spin_lock_init(&sdcp->lock[i]);
1082		init_completion(&sdcp->completion[i]);
1083		crypto_init_queue(&sdcp->queue[i], 50);
1084	}
1085
1086	/* Create the SHA and AES handler threads. */
1087	sdcp->thread[DCP_CHAN_HASH_SHA] = kthread_run(dcp_chan_thread_sha,
1088						      NULL, "mxs_dcp_chan/sha");
1089	if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) {
1090		dev_err(dev, "Error starting SHA thread!\n");
1091		ret = PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]);
1092		goto err_disable_unprepare_clk;
1093	}
1094
1095	sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes,
1096						    NULL, "mxs_dcp_chan/aes");
1097	if (IS_ERR(sdcp->thread[DCP_CHAN_CRYPTO])) {
1098		dev_err(dev, "Error starting SHA thread!\n");
1099		ret = PTR_ERR(sdcp->thread[DCP_CHAN_CRYPTO]);
1100		goto err_destroy_sha_thread;
1101	}
1102
1103	/* Register the various crypto algorithms. */
1104	sdcp->caps = readl(sdcp->base + MXS_DCP_CAPABILITY1);
1105
1106	if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) {
1107		ret = crypto_register_algs(dcp_aes_algs,
1108					   ARRAY_SIZE(dcp_aes_algs));
1109		if (ret) {
1110			/* Failed to register algorithm. */
1111			dev_err(dev, "Failed to register AES crypto!\n");
1112			goto err_destroy_aes_thread;
1113		}
1114	}
1115
1116	if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) {
1117		ret = crypto_register_ahash(&dcp_sha1_alg);
1118		if (ret) {
1119			dev_err(dev, "Failed to register %s hash!\n",
1120				dcp_sha1_alg.halg.base.cra_name);
1121			goto err_unregister_aes;
1122		}
1123	}
1124
1125	if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) {
1126		ret = crypto_register_ahash(&dcp_sha256_alg);
1127		if (ret) {
1128			dev_err(dev, "Failed to register %s hash!\n",
1129				dcp_sha256_alg.halg.base.cra_name);
1130			goto err_unregister_sha1;
1131		}
1132	}
1133
1134	return 0;
1135
1136err_unregister_sha1:
1137	if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
1138		crypto_unregister_ahash(&dcp_sha1_alg);
1139
1140err_unregister_aes:
1141	if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
1142		crypto_unregister_algs(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
1143
1144err_destroy_aes_thread:
1145	kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
1146
1147err_destroy_sha_thread:
1148	kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
1149
1150err_disable_unprepare_clk:
1151	clk_disable_unprepare(sdcp->dcp_clk);
1152
1153	return ret;
1154}
1155
1156static int mxs_dcp_remove(struct platform_device *pdev)
1157{
1158	struct dcp *sdcp = platform_get_drvdata(pdev);
1159
1160	if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256)
1161		crypto_unregister_ahash(&dcp_sha256_alg);
1162
1163	if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
1164		crypto_unregister_ahash(&dcp_sha1_alg);
1165
1166	if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
1167		crypto_unregister_algs(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
1168
1169	kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
1170	kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
1171
1172	clk_disable_unprepare(sdcp->dcp_clk);
1173
1174	platform_set_drvdata(pdev, NULL);
1175
1176	global_sdcp = NULL;
1177
1178	return 0;
1179}
1180
1181static const struct of_device_id mxs_dcp_dt_ids[] = {
1182	{ .compatible = "fsl,imx23-dcp", .data = NULL, },
1183	{ .compatible = "fsl,imx28-dcp", .data = NULL, },
1184	{ /* sentinel */ }
1185};
1186
1187MODULE_DEVICE_TABLE(of, mxs_dcp_dt_ids);
1188
1189static struct platform_driver mxs_dcp_driver = {
1190	.probe	= mxs_dcp_probe,
1191	.remove	= mxs_dcp_remove,
1192	.driver	= {
1193		.name		= "mxs-dcp",
1194		.of_match_table	= mxs_dcp_dt_ids,
1195	},
1196};
1197
1198module_platform_driver(mxs_dcp_driver);
1199
1200MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
1201MODULE_DESCRIPTION("Freescale MXS DCP Driver");
1202MODULE_LICENSE("GPL");
1203MODULE_ALIAS("platform:mxs-dcp");