Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Freescale i.MX23/i.MX28 Data Co-Processor driver
   4 *
   5 * Copyright (C) 2013 Marek Vasut <marex@denx.de>
   6 */
   7
   8#include <linux/dma-mapping.h>
   9#include <linux/interrupt.h>
  10#include <linux/io.h>
  11#include <linux/kernel.h>
  12#include <linux/kthread.h>
  13#include <linux/module.h>
  14#include <linux/of.h>
  15#include <linux/platform_device.h>
  16#include <linux/stmp_device.h>
  17#include <linux/clk.h>
  18
  19#include <crypto/aes.h>
  20#include <crypto/sha1.h>
  21#include <crypto/sha2.h>
  22#include <crypto/internal/hash.h>
  23#include <crypto/internal/skcipher.h>
  24#include <crypto/scatterwalk.h>
  25
  26#define DCP_MAX_CHANS	4
  27#define DCP_BUF_SZ	PAGE_SIZE
  28#define DCP_SHA_PAY_SZ  64
  29
  30#define DCP_ALIGNMENT	64
  31
  32/*
  33 * Null hashes to align with hw behavior on imx6sl and ull
  34 * these are flipped for consistency with hw output
  35 */
  36static const uint8_t sha1_null_hash[] =
  37	"\x09\x07\xd8\xaf\x90\x18\x60\x95\xef\xbf"
  38	"\x55\x32\x0d\x4b\x6b\x5e\xee\xa3\x39\xda";
  39
  40static const uint8_t sha256_null_hash[] =
  41	"\x55\xb8\x52\x78\x1b\x99\x95\xa4"
  42	"\x4c\x93\x9b\x64\xe4\x41\xae\x27"
  43	"\x24\xb9\x6f\x99\xc8\xf4\xfb\x9a"
  44	"\x14\x1c\xfc\x98\x42\xc4\xb0\xe3";
  45
  46/* DCP DMA descriptor. */
  47struct dcp_dma_desc {
  48	uint32_t	next_cmd_addr;
  49	uint32_t	control0;
  50	uint32_t	control1;
  51	uint32_t	source;
  52	uint32_t	destination;
  53	uint32_t	size;
  54	uint32_t	payload;
  55	uint32_t	status;
  56};
  57
  58/* Coherent aligned block for bounce buffering. */
  59struct dcp_coherent_block {
  60	uint8_t			aes_in_buf[DCP_BUF_SZ];
  61	uint8_t			aes_out_buf[DCP_BUF_SZ];
  62	uint8_t			sha_in_buf[DCP_BUF_SZ];
  63	uint8_t			sha_out_buf[DCP_SHA_PAY_SZ];
  64
  65	uint8_t			aes_key[2 * AES_KEYSIZE_128];
  66
  67	struct dcp_dma_desc	desc[DCP_MAX_CHANS];
  68};
  69
  70struct dcp {
  71	struct device			*dev;
  72	void __iomem			*base;
  73
  74	uint32_t			caps;
  75
  76	struct dcp_coherent_block	*coh;
  77
  78	struct completion		completion[DCP_MAX_CHANS];
  79	spinlock_t			lock[DCP_MAX_CHANS];
  80	struct task_struct		*thread[DCP_MAX_CHANS];
  81	struct crypto_queue		queue[DCP_MAX_CHANS];
  82	struct clk			*dcp_clk;
  83};
  84
  85enum dcp_chan {
  86	DCP_CHAN_HASH_SHA	= 0,
  87	DCP_CHAN_CRYPTO		= 2,
  88};
  89
  90struct dcp_async_ctx {
  91	/* Common context */
  92	enum dcp_chan	chan;
  93	uint32_t	fill;
  94
  95	/* SHA Hash-specific context */
  96	struct mutex			mutex;
  97	uint32_t			alg;
  98	unsigned int			hot:1;
  99
 100	/* Crypto-specific context */
 101	struct crypto_skcipher		*fallback;
 102	unsigned int			key_len;
 103	uint8_t				key[AES_KEYSIZE_128];
 104};
 105
 106struct dcp_aes_req_ctx {
 107	unsigned int	enc:1;
 108	unsigned int	ecb:1;
 109	struct skcipher_request fallback_req;	// keep at the end
 110};
 111
 112struct dcp_sha_req_ctx {
 113	unsigned int	init:1;
 114	unsigned int	fini:1;
 115};
 116
 117struct dcp_export_state {
 118	struct dcp_sha_req_ctx req_ctx;
 119	struct dcp_async_ctx async_ctx;
 120};
 121
 122/*
 123 * There can even be only one instance of the MXS DCP due to the
 124 * design of Linux Crypto API.
 125 */
 126static struct dcp *global_sdcp;
 127
 128/* DCP register layout. */
 129#define MXS_DCP_CTRL				0x00
 130#define MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES	(1 << 23)
 131#define MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING	(1 << 22)
 132
 133#define MXS_DCP_STAT				0x10
 134#define MXS_DCP_STAT_CLR			0x18
 135#define MXS_DCP_STAT_IRQ_MASK			0xf
 136
 137#define MXS_DCP_CHANNELCTRL			0x20
 138#define MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK	0xff
 139
 140#define MXS_DCP_CAPABILITY1			0x40
 141#define MXS_DCP_CAPABILITY1_SHA256		(4 << 16)
 142#define MXS_DCP_CAPABILITY1_SHA1		(1 << 16)
 143#define MXS_DCP_CAPABILITY1_AES128		(1 << 0)
 144
 145#define MXS_DCP_CONTEXT				0x50
 146
 147#define MXS_DCP_CH_N_CMDPTR(n)			(0x100 + ((n) * 0x40))
 148
 149#define MXS_DCP_CH_N_SEMA(n)			(0x110 + ((n) * 0x40))
 150
 151#define MXS_DCP_CH_N_STAT(n)			(0x120 + ((n) * 0x40))
 152#define MXS_DCP_CH_N_STAT_CLR(n)		(0x128 + ((n) * 0x40))
 153
 154/* DMA descriptor bits. */
 155#define MXS_DCP_CONTROL0_HASH_TERM		(1 << 13)
 156#define MXS_DCP_CONTROL0_HASH_INIT		(1 << 12)
 157#define MXS_DCP_CONTROL0_PAYLOAD_KEY		(1 << 11)
 158#define MXS_DCP_CONTROL0_CIPHER_ENCRYPT		(1 << 8)
 159#define MXS_DCP_CONTROL0_CIPHER_INIT		(1 << 9)
 160#define MXS_DCP_CONTROL0_ENABLE_HASH		(1 << 6)
 161#define MXS_DCP_CONTROL0_ENABLE_CIPHER		(1 << 5)
 162#define MXS_DCP_CONTROL0_DECR_SEMAPHORE		(1 << 1)
 163#define MXS_DCP_CONTROL0_INTERRUPT		(1 << 0)
 164
 165#define MXS_DCP_CONTROL1_HASH_SELECT_SHA256	(2 << 16)
 166#define MXS_DCP_CONTROL1_HASH_SELECT_SHA1	(0 << 16)
 167#define MXS_DCP_CONTROL1_CIPHER_MODE_CBC	(1 << 4)
 168#define MXS_DCP_CONTROL1_CIPHER_MODE_ECB	(0 << 4)
 169#define MXS_DCP_CONTROL1_CIPHER_SELECT_AES128	(0 << 0)
 170
 171static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
 172{
 173	int dma_err;
 174	struct dcp *sdcp = global_sdcp;
 175	const int chan = actx->chan;
 176	uint32_t stat;
 177	unsigned long ret;
 178	struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
 
 179	dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc),
 180					      DMA_TO_DEVICE);
 181
 182	dma_err = dma_mapping_error(sdcp->dev, desc_phys);
 183	if (dma_err)
 184		return dma_err;
 185
 186	reinit_completion(&sdcp->completion[chan]);
 187
 188	/* Clear status register. */
 189	writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(chan));
 190
 191	/* Load the DMA descriptor. */
 192	writel(desc_phys, sdcp->base + MXS_DCP_CH_N_CMDPTR(chan));
 193
 194	/* Increment the semaphore to start the DMA transfer. */
 195	writel(1, sdcp->base + MXS_DCP_CH_N_SEMA(chan));
 196
 197	ret = wait_for_completion_timeout(&sdcp->completion[chan],
 198					  msecs_to_jiffies(1000));
 199	if (!ret) {
 200		dev_err(sdcp->dev, "Channel %i timeout (DCP_STAT=0x%08x)\n",
 201			chan, readl(sdcp->base + MXS_DCP_STAT));
 202		return -ETIMEDOUT;
 203	}
 204
 205	stat = readl(sdcp->base + MXS_DCP_CH_N_STAT(chan));
 206	if (stat & 0xff) {
 207		dev_err(sdcp->dev, "Channel %i error (CH_STAT=0x%08x)\n",
 208			chan, stat);
 209		return -EINVAL;
 210	}
 211
 212	dma_unmap_single(sdcp->dev, desc_phys, sizeof(*desc), DMA_TO_DEVICE);
 213
 214	return 0;
 215}
 216
 217/*
 218 * Encryption (AES128)
 219 */
 220static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
 221			   struct skcipher_request *req, int init)
 222{
 223	dma_addr_t key_phys, src_phys, dst_phys;
 224	struct dcp *sdcp = global_sdcp;
 225	struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
 226	struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
 227	int ret;
 228
 229	key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
 230				  2 * AES_KEYSIZE_128, DMA_TO_DEVICE);
 231	ret = dma_mapping_error(sdcp->dev, key_phys);
 232	if (ret)
 233		return ret;
 234
 235	src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
 236				  DCP_BUF_SZ, DMA_TO_DEVICE);
 237	ret = dma_mapping_error(sdcp->dev, src_phys);
 238	if (ret)
 239		goto err_src;
 240
 241	dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
 242				  DCP_BUF_SZ, DMA_FROM_DEVICE);
 243	ret = dma_mapping_error(sdcp->dev, dst_phys);
 244	if (ret)
 245		goto err_dst;
 246
 247	if (actx->fill % AES_BLOCK_SIZE) {
 248		dev_err(sdcp->dev, "Invalid block size!\n");
 249		ret = -EINVAL;
 250		goto aes_done_run;
 251	}
 252
 253	/* Fill in the DMA descriptor. */
 254	desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
 255		    MXS_DCP_CONTROL0_INTERRUPT |
 256		    MXS_DCP_CONTROL0_ENABLE_CIPHER;
 257
 258	/* Payload contains the key. */
 259	desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY;
 260
 261	if (rctx->enc)
 262		desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT;
 263	if (init)
 264		desc->control0 |= MXS_DCP_CONTROL0_CIPHER_INIT;
 265
 266	desc->control1 = MXS_DCP_CONTROL1_CIPHER_SELECT_AES128;
 267
 268	if (rctx->ecb)
 269		desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_ECB;
 270	else
 271		desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_CBC;
 272
 273	desc->next_cmd_addr = 0;
 274	desc->source = src_phys;
 275	desc->destination = dst_phys;
 276	desc->size = actx->fill;
 277	desc->payload = key_phys;
 278	desc->status = 0;
 279
 280	ret = mxs_dcp_start_dma(actx);
 281
 282aes_done_run:
 283	dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE);
 284err_dst:
 285	dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
 286err_src:
 287	dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
 288			 DMA_TO_DEVICE);
 
 
 289
 290	return ret;
 291}
 292
 293static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
 294{
 295	struct dcp *sdcp = global_sdcp;
 296
 297	struct skcipher_request *req = skcipher_request_cast(arq);
 298	struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
 299	struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
 300
 301	struct scatterlist *dst = req->dst;
 302	struct scatterlist *src = req->src;
 303	int dst_nents = sg_nents(dst);
 304
 305	const int out_off = DCP_BUF_SZ;
 306	uint8_t *in_buf = sdcp->coh->aes_in_buf;
 307	uint8_t *out_buf = sdcp->coh->aes_out_buf;
 308
 
 309	uint32_t dst_off = 0;
 310	uint8_t *src_buf = NULL;
 311	uint32_t last_out_len = 0;
 312
 313	uint8_t *key = sdcp->coh->aes_key;
 314
 315	int ret = 0;
 316	unsigned int i, len, clen, tlen = 0;
 
 317	int init = 0;
 318	bool limit_hit = false;
 319
 320	actx->fill = 0;
 321
 322	/* Copy the key from the temporary location. */
 323	memcpy(key, actx->key, actx->key_len);
 324
 325	if (!rctx->ecb) {
 326		/* Copy the CBC IV just past the key. */
 327		memcpy(key + AES_KEYSIZE_128, req->iv, AES_KEYSIZE_128);
 328		/* CBC needs the INIT set. */
 329		init = 1;
 330	} else {
 331		memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128);
 332	}
 333
 334	for_each_sg(req->src, src, sg_nents(req->src), i) {
 335		src_buf = sg_virt(src);
 336		len = sg_dma_len(src);
 337		tlen += len;
 338		limit_hit = tlen > req->cryptlen;
 339
 340		if (limit_hit)
 341			len = req->cryptlen - (tlen - len);
 342
 343		do {
 344			if (actx->fill + len > out_off)
 345				clen = out_off - actx->fill;
 346			else
 347				clen = len;
 348
 349			memcpy(in_buf + actx->fill, src_buf, clen);
 350			len -= clen;
 351			src_buf += clen;
 352			actx->fill += clen;
 353
 354			/*
 355			 * If we filled the buffer or this is the last SG,
 356			 * submit the buffer.
 357			 */
 358			if (actx->fill == out_off || sg_is_last(src) ||
 359			    limit_hit) {
 360				ret = mxs_dcp_run_aes(actx, req, init);
 361				if (ret)
 362					return ret;
 363				init = 0;
 364
 365				sg_pcopy_from_buffer(dst, dst_nents, out_buf,
 366						     actx->fill, dst_off);
 367				dst_off += actx->fill;
 368				last_out_len = actx->fill;
 369				actx->fill = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 370			}
 371		} while (len);
 372
 373		if (limit_hit)
 374			break;
 375	}
 376
 377	/* Copy the IV for CBC for chaining */
 378	if (!rctx->ecb) {
 379		if (rctx->enc)
 380			memcpy(req->iv, out_buf+(last_out_len-AES_BLOCK_SIZE),
 381				AES_BLOCK_SIZE);
 382		else
 383			memcpy(req->iv, in_buf+(last_out_len-AES_BLOCK_SIZE),
 384				AES_BLOCK_SIZE);
 385	}
 386
 387	return ret;
 388}
 389
 390static int dcp_chan_thread_aes(void *data)
 391{
 392	struct dcp *sdcp = global_sdcp;
 393	const int chan = DCP_CHAN_CRYPTO;
 394
 395	struct crypto_async_request *backlog;
 396	struct crypto_async_request *arq;
 397
 398	int ret;
 399
 400	while (!kthread_should_stop()) {
 401		set_current_state(TASK_INTERRUPTIBLE);
 402
 403		spin_lock(&sdcp->lock[chan]);
 404		backlog = crypto_get_backlog(&sdcp->queue[chan]);
 405		arq = crypto_dequeue_request(&sdcp->queue[chan]);
 406		spin_unlock(&sdcp->lock[chan]);
 407
 408		if (!backlog && !arq) {
 409			schedule();
 410			continue;
 411		}
 412
 413		set_current_state(TASK_RUNNING);
 414
 415		if (backlog)
 416			crypto_request_complete(backlog, -EINPROGRESS);
 417
 418		if (arq) {
 419			ret = mxs_dcp_aes_block_crypt(arq);
 420			crypto_request_complete(arq, ret);
 421		}
 422	}
 423
 424	return 0;
 425}
 426
 427static int mxs_dcp_block_fallback(struct skcipher_request *req, int enc)
 428{
 429	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 430	struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
 431	struct dcp_async_ctx *ctx = crypto_skcipher_ctx(tfm);
 432	int ret;
 433
 434	skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
 435	skcipher_request_set_callback(&rctx->fallback_req, req->base.flags,
 436				      req->base.complete, req->base.data);
 437	skcipher_request_set_crypt(&rctx->fallback_req, req->src, req->dst,
 438				   req->cryptlen, req->iv);
 439
 440	if (enc)
 441		ret = crypto_skcipher_encrypt(&rctx->fallback_req);
 442	else
 443		ret = crypto_skcipher_decrypt(&rctx->fallback_req);
 444
 445	return ret;
 446}
 447
 448static int mxs_dcp_aes_enqueue(struct skcipher_request *req, int enc, int ecb)
 449{
 450	struct dcp *sdcp = global_sdcp;
 451	struct crypto_async_request *arq = &req->base;
 452	struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
 453	struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
 454	int ret;
 455
 456	if (unlikely(actx->key_len != AES_KEYSIZE_128))
 457		return mxs_dcp_block_fallback(req, enc);
 458
 459	rctx->enc = enc;
 460	rctx->ecb = ecb;
 461	actx->chan = DCP_CHAN_CRYPTO;
 462
 463	spin_lock(&sdcp->lock[actx->chan]);
 464	ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
 465	spin_unlock(&sdcp->lock[actx->chan]);
 466
 467	wake_up_process(sdcp->thread[actx->chan]);
 468
 469	return ret;
 470}
 471
 472static int mxs_dcp_aes_ecb_decrypt(struct skcipher_request *req)
 473{
 474	return mxs_dcp_aes_enqueue(req, 0, 1);
 475}
 476
 477static int mxs_dcp_aes_ecb_encrypt(struct skcipher_request *req)
 478{
 479	return mxs_dcp_aes_enqueue(req, 1, 1);
 480}
 481
 482static int mxs_dcp_aes_cbc_decrypt(struct skcipher_request *req)
 483{
 484	return mxs_dcp_aes_enqueue(req, 0, 0);
 485}
 486
 487static int mxs_dcp_aes_cbc_encrypt(struct skcipher_request *req)
 488{
 489	return mxs_dcp_aes_enqueue(req, 1, 0);
 490}
 491
 492static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
 493			      unsigned int len)
 494{
 495	struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
 496
 497	/*
 498	 * AES 128 is supposed by the hardware, store key into temporary
 499	 * buffer and exit. We must use the temporary buffer here, since
 500	 * there can still be an operation in progress.
 501	 */
 502	actx->key_len = len;
 503	if (len == AES_KEYSIZE_128) {
 504		memcpy(actx->key, key, len);
 505		return 0;
 506	}
 507
 508	/*
 509	 * If the requested AES key size is not supported by the hardware,
 510	 * but is supported by in-kernel software implementation, we use
 511	 * software fallback.
 512	 */
 513	crypto_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK);
 514	crypto_skcipher_set_flags(actx->fallback,
 515				  tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
 516	return crypto_skcipher_setkey(actx->fallback, key, len);
 517}
 518
 519static int mxs_dcp_aes_fallback_init_tfm(struct crypto_skcipher *tfm)
 520{
 521	const char *name = crypto_tfm_alg_name(crypto_skcipher_tfm(tfm));
 522	struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
 523	struct crypto_skcipher *blk;
 524
 525	blk = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
 526	if (IS_ERR(blk))
 527		return PTR_ERR(blk);
 528
 529	actx->fallback = blk;
 530	crypto_skcipher_set_reqsize(tfm, sizeof(struct dcp_aes_req_ctx) +
 531					 crypto_skcipher_reqsize(blk));
 532	return 0;
 533}
 534
 535static void mxs_dcp_aes_fallback_exit_tfm(struct crypto_skcipher *tfm)
 536{
 537	struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
 538
 539	crypto_free_skcipher(actx->fallback);
 540}
 541
 542/*
 543 * Hashing (SHA1/SHA256)
 544 */
 545static int mxs_dcp_run_sha(struct ahash_request *req)
 546{
 547	struct dcp *sdcp = global_sdcp;
 548	int ret;
 549
 550	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 551	struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
 552	struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
 553	struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
 554
 555	dma_addr_t digest_phys = 0;
 556	dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf,
 557					     DCP_BUF_SZ, DMA_TO_DEVICE);
 558
 559	ret = dma_mapping_error(sdcp->dev, buf_phys);
 560	if (ret)
 561		return ret;
 562
 563	/* Fill in the DMA descriptor. */
 564	desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
 565		    MXS_DCP_CONTROL0_INTERRUPT |
 566		    MXS_DCP_CONTROL0_ENABLE_HASH;
 567	if (rctx->init)
 568		desc->control0 |= MXS_DCP_CONTROL0_HASH_INIT;
 569
 570	desc->control1 = actx->alg;
 571	desc->next_cmd_addr = 0;
 572	desc->source = buf_phys;
 573	desc->destination = 0;
 574	desc->size = actx->fill;
 575	desc->payload = 0;
 576	desc->status = 0;
 577
 578	/*
 579	 * Align driver with hw behavior when generating null hashes
 580	 */
 581	if (rctx->init && rctx->fini && desc->size == 0) {
 582		struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
 583		const uint8_t *sha_buf =
 584			(actx->alg == MXS_DCP_CONTROL1_HASH_SELECT_SHA1) ?
 585			sha1_null_hash : sha256_null_hash;
 586		memcpy(sdcp->coh->sha_out_buf, sha_buf, halg->digestsize);
 587		ret = 0;
 588		goto done_run;
 589	}
 590
 591	/* Set HASH_TERM bit for last transfer block. */
 592	if (rctx->fini) {
 593		digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf,
 594					     DCP_SHA_PAY_SZ, DMA_FROM_DEVICE);
 595		ret = dma_mapping_error(sdcp->dev, digest_phys);
 596		if (ret)
 597			goto done_run;
 598
 599		desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM;
 600		desc->payload = digest_phys;
 601	}
 602
 603	ret = mxs_dcp_start_dma(actx);
 604
 605	if (rctx->fini)
 606		dma_unmap_single(sdcp->dev, digest_phys, DCP_SHA_PAY_SZ,
 607				 DMA_FROM_DEVICE);
 608
 609done_run:
 610	dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
 611
 612	return ret;
 613}
 614
 615static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
 616{
 617	struct dcp *sdcp = global_sdcp;
 618
 619	struct ahash_request *req = ahash_request_cast(arq);
 620	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 621	struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
 622	struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
 623	struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
 624
 625	uint8_t *in_buf = sdcp->coh->sha_in_buf;
 626	uint8_t *out_buf = sdcp->coh->sha_out_buf;
 627
 628	struct scatterlist *src;
 629
 630	unsigned int i, len, clen, oft = 0;
 631	int ret;
 632
 633	int fin = rctx->fini;
 634	if (fin)
 635		rctx->fini = 0;
 636
 637	src = req->src;
 638	len = req->nbytes;
 639
 640	while (len) {
 641		if (actx->fill + len > DCP_BUF_SZ)
 642			clen = DCP_BUF_SZ - actx->fill;
 643		else
 644			clen = len;
 645
 646		scatterwalk_map_and_copy(in_buf + actx->fill, src, oft, clen,
 647					 0);
 648
 649		len -= clen;
 650		oft += clen;
 651		actx->fill += clen;
 652
 653		/*
 654		 * If we filled the buffer and still have some
 655		 * more data, submit the buffer.
 656		 */
 657		if (len && actx->fill == DCP_BUF_SZ) {
 658			ret = mxs_dcp_run_sha(req);
 659			if (ret)
 660				return ret;
 661			actx->fill = 0;
 662			rctx->init = 0;
 663		}
 664	}
 665
 666	if (fin) {
 667		rctx->fini = 1;
 668
 669		/* Submit whatever is left. */
 670		if (!req->result)
 671			return -EINVAL;
 672
 673		ret = mxs_dcp_run_sha(req);
 674		if (ret)
 675			return ret;
 676
 677		actx->fill = 0;
 678
 679		/* For some reason the result is flipped */
 680		for (i = 0; i < halg->digestsize; i++)
 681			req->result[i] = out_buf[halg->digestsize - i - 1];
 682	}
 683
 684	return 0;
 685}
 686
 687static int dcp_chan_thread_sha(void *data)
 688{
 689	struct dcp *sdcp = global_sdcp;
 690	const int chan = DCP_CHAN_HASH_SHA;
 691
 692	struct crypto_async_request *backlog;
 693	struct crypto_async_request *arq;
 694	int ret;
 695
 696	while (!kthread_should_stop()) {
 697		set_current_state(TASK_INTERRUPTIBLE);
 698
 699		spin_lock(&sdcp->lock[chan]);
 700		backlog = crypto_get_backlog(&sdcp->queue[chan]);
 701		arq = crypto_dequeue_request(&sdcp->queue[chan]);
 702		spin_unlock(&sdcp->lock[chan]);
 703
 704		if (!backlog && !arq) {
 705			schedule();
 706			continue;
 707		}
 708
 709		set_current_state(TASK_RUNNING);
 710
 711		if (backlog)
 712			crypto_request_complete(backlog, -EINPROGRESS);
 713
 714		if (arq) {
 715			ret = dcp_sha_req_to_buf(arq);
 716			crypto_request_complete(arq, ret);
 717		}
 718	}
 719
 720	return 0;
 721}
 722
 723static int dcp_sha_init(struct ahash_request *req)
 724{
 725	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 726	struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
 727
 728	struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
 729
 730	/*
 731	 * Start hashing session. The code below only inits the
 732	 * hashing session context, nothing more.
 733	 */
 734	memset(actx, 0, sizeof(*actx));
 735
 736	if (strcmp(halg->base.cra_name, "sha1") == 0)
 737		actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA1;
 738	else
 739		actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA256;
 740
 741	actx->fill = 0;
 742	actx->hot = 0;
 743	actx->chan = DCP_CHAN_HASH_SHA;
 744
 745	mutex_init(&actx->mutex);
 746
 747	return 0;
 748}
 749
 750static int dcp_sha_update_fx(struct ahash_request *req, int fini)
 751{
 752	struct dcp *sdcp = global_sdcp;
 753
 754	struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
 755	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 756	struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
 757
 758	int ret;
 759
 760	/*
 761	 * Ignore requests that have no data in them and are not
 762	 * the trailing requests in the stream of requests.
 763	 */
 764	if (!req->nbytes && !fini)
 765		return 0;
 766
 767	mutex_lock(&actx->mutex);
 768
 769	rctx->fini = fini;
 770
 771	if (!actx->hot) {
 772		actx->hot = 1;
 773		rctx->init = 1;
 774	}
 775
 776	spin_lock(&sdcp->lock[actx->chan]);
 777	ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
 778	spin_unlock(&sdcp->lock[actx->chan]);
 779
 780	wake_up_process(sdcp->thread[actx->chan]);
 781	mutex_unlock(&actx->mutex);
 782
 783	return ret;
 784}
 785
 786static int dcp_sha_update(struct ahash_request *req)
 787{
 788	return dcp_sha_update_fx(req, 0);
 789}
 790
 791static int dcp_sha_final(struct ahash_request *req)
 792{
 793	ahash_request_set_crypt(req, NULL, req->result, 0);
 794	req->nbytes = 0;
 795	return dcp_sha_update_fx(req, 1);
 796}
 797
 798static int dcp_sha_finup(struct ahash_request *req)
 799{
 800	return dcp_sha_update_fx(req, 1);
 801}
 802
 803static int dcp_sha_digest(struct ahash_request *req)
 804{
 805	int ret;
 806
 807	ret = dcp_sha_init(req);
 808	if (ret)
 809		return ret;
 810
 811	return dcp_sha_finup(req);
 812}
 813
 814static int dcp_sha_import(struct ahash_request *req, const void *in)
 815{
 816	struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
 817	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 818	struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
 819	const struct dcp_export_state *export = in;
 820
 821	memset(rctx, 0, sizeof(struct dcp_sha_req_ctx));
 822	memset(actx, 0, sizeof(struct dcp_async_ctx));
 823	memcpy(rctx, &export->req_ctx, sizeof(struct dcp_sha_req_ctx));
 824	memcpy(actx, &export->async_ctx, sizeof(struct dcp_async_ctx));
 825
 826	return 0;
 827}
 828
 829static int dcp_sha_export(struct ahash_request *req, void *out)
 830{
 831	struct dcp_sha_req_ctx *rctx_state = ahash_request_ctx(req);
 832	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 833	struct dcp_async_ctx *actx_state = crypto_ahash_ctx(tfm);
 834	struct dcp_export_state *export = out;
 835
 836	memcpy(&export->req_ctx, rctx_state, sizeof(struct dcp_sha_req_ctx));
 837	memcpy(&export->async_ctx, actx_state, sizeof(struct dcp_async_ctx));
 838
 839	return 0;
 840}
 841
 842static int dcp_sha_cra_init(struct crypto_tfm *tfm)
 843{
 844	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 845				 sizeof(struct dcp_sha_req_ctx));
 846	return 0;
 847}
 848
 849static void dcp_sha_cra_exit(struct crypto_tfm *tfm)
 850{
 851}
 852
 853/* AES 128 ECB and AES 128 CBC */
 854static struct skcipher_alg dcp_aes_algs[] = {
 855	{
 856		.base.cra_name		= "ecb(aes)",
 857		.base.cra_driver_name	= "ecb-aes-dcp",
 858		.base.cra_priority	= 400,
 859		.base.cra_alignmask	= 15,
 860		.base.cra_flags		= CRYPTO_ALG_ASYNC |
 861					  CRYPTO_ALG_NEED_FALLBACK,
 862		.base.cra_blocksize	= AES_BLOCK_SIZE,
 863		.base.cra_ctxsize	= sizeof(struct dcp_async_ctx),
 864		.base.cra_module	= THIS_MODULE,
 865
 866		.min_keysize		= AES_MIN_KEY_SIZE,
 867		.max_keysize		= AES_MAX_KEY_SIZE,
 868		.setkey			= mxs_dcp_aes_setkey,
 869		.encrypt		= mxs_dcp_aes_ecb_encrypt,
 870		.decrypt		= mxs_dcp_aes_ecb_decrypt,
 871		.init			= mxs_dcp_aes_fallback_init_tfm,
 872		.exit			= mxs_dcp_aes_fallback_exit_tfm,
 873	}, {
 874		.base.cra_name		= "cbc(aes)",
 875		.base.cra_driver_name	= "cbc-aes-dcp",
 876		.base.cra_priority	= 400,
 877		.base.cra_alignmask	= 15,
 878		.base.cra_flags		= CRYPTO_ALG_ASYNC |
 879					  CRYPTO_ALG_NEED_FALLBACK,
 880		.base.cra_blocksize	= AES_BLOCK_SIZE,
 881		.base.cra_ctxsize	= sizeof(struct dcp_async_ctx),
 882		.base.cra_module	= THIS_MODULE,
 883
 884		.min_keysize		= AES_MIN_KEY_SIZE,
 885		.max_keysize		= AES_MAX_KEY_SIZE,
 886		.setkey			= mxs_dcp_aes_setkey,
 887		.encrypt		= mxs_dcp_aes_cbc_encrypt,
 888		.decrypt		= mxs_dcp_aes_cbc_decrypt,
 889		.ivsize			= AES_BLOCK_SIZE,
 890		.init			= mxs_dcp_aes_fallback_init_tfm,
 891		.exit			= mxs_dcp_aes_fallback_exit_tfm,
 892	},
 893};
 894
 895/* SHA1 */
 896static struct ahash_alg dcp_sha1_alg = {
 897	.init	= dcp_sha_init,
 898	.update	= dcp_sha_update,
 899	.final	= dcp_sha_final,
 900	.finup	= dcp_sha_finup,
 901	.digest	= dcp_sha_digest,
 902	.import = dcp_sha_import,
 903	.export = dcp_sha_export,
 904	.halg	= {
 905		.digestsize	= SHA1_DIGEST_SIZE,
 906		.statesize	= sizeof(struct dcp_export_state),
 907		.base		= {
 908			.cra_name		= "sha1",
 909			.cra_driver_name	= "sha1-dcp",
 910			.cra_priority		= 400,
 
 911			.cra_flags		= CRYPTO_ALG_ASYNC,
 912			.cra_blocksize		= SHA1_BLOCK_SIZE,
 913			.cra_ctxsize		= sizeof(struct dcp_async_ctx),
 914			.cra_module		= THIS_MODULE,
 915			.cra_init		= dcp_sha_cra_init,
 916			.cra_exit		= dcp_sha_cra_exit,
 917		},
 918	},
 919};
 920
 921/* SHA256 */
 922static struct ahash_alg dcp_sha256_alg = {
 923	.init	= dcp_sha_init,
 924	.update	= dcp_sha_update,
 925	.final	= dcp_sha_final,
 926	.finup	= dcp_sha_finup,
 927	.digest	= dcp_sha_digest,
 928	.import = dcp_sha_import,
 929	.export = dcp_sha_export,
 930	.halg	= {
 931		.digestsize	= SHA256_DIGEST_SIZE,
 932		.statesize	= sizeof(struct dcp_export_state),
 933		.base		= {
 934			.cra_name		= "sha256",
 935			.cra_driver_name	= "sha256-dcp",
 936			.cra_priority		= 400,
 
 937			.cra_flags		= CRYPTO_ALG_ASYNC,
 938			.cra_blocksize		= SHA256_BLOCK_SIZE,
 939			.cra_ctxsize		= sizeof(struct dcp_async_ctx),
 940			.cra_module		= THIS_MODULE,
 941			.cra_init		= dcp_sha_cra_init,
 942			.cra_exit		= dcp_sha_cra_exit,
 943		},
 944	},
 945};
 946
 947static irqreturn_t mxs_dcp_irq(int irq, void *context)
 948{
 949	struct dcp *sdcp = context;
 950	uint32_t stat;
 951	int i;
 952
 953	stat = readl(sdcp->base + MXS_DCP_STAT);
 954	stat &= MXS_DCP_STAT_IRQ_MASK;
 955	if (!stat)
 956		return IRQ_NONE;
 957
 958	/* Clear the interrupts. */
 959	writel(stat, sdcp->base + MXS_DCP_STAT_CLR);
 960
 961	/* Complete the DMA requests that finished. */
 962	for (i = 0; i < DCP_MAX_CHANS; i++)
 963		if (stat & (1 << i))
 964			complete(&sdcp->completion[i]);
 965
 966	return IRQ_HANDLED;
 967}
 968
 969static int mxs_dcp_probe(struct platform_device *pdev)
 970{
 971	struct device *dev = &pdev->dev;
 972	struct dcp *sdcp = NULL;
 973	int i, ret;
 974	int dcp_vmi_irq, dcp_irq;
 975
 976	if (global_sdcp) {
 977		dev_err(dev, "Only one DCP instance allowed!\n");
 978		return -ENODEV;
 979	}
 980
 981	dcp_vmi_irq = platform_get_irq(pdev, 0);
 982	if (dcp_vmi_irq < 0)
 983		return dcp_vmi_irq;
 984
 985	dcp_irq = platform_get_irq(pdev, 1);
 986	if (dcp_irq < 0)
 987		return dcp_irq;
 988
 989	sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL);
 990	if (!sdcp)
 991		return -ENOMEM;
 992
 993	sdcp->dev = dev;
 994	sdcp->base = devm_platform_ioremap_resource(pdev, 0);
 995	if (IS_ERR(sdcp->base))
 996		return PTR_ERR(sdcp->base);
 997
 998
 999	ret = devm_request_irq(dev, dcp_vmi_irq, mxs_dcp_irq, 0,
1000			       "dcp-vmi-irq", sdcp);
1001	if (ret) {
1002		dev_err(dev, "Failed to claim DCP VMI IRQ!\n");
1003		return ret;
1004	}
1005
1006	ret = devm_request_irq(dev, dcp_irq, mxs_dcp_irq, 0,
1007			       "dcp-irq", sdcp);
1008	if (ret) {
1009		dev_err(dev, "Failed to claim DCP IRQ!\n");
1010		return ret;
1011	}
1012
1013	/* Allocate coherent helper block. */
1014	sdcp->coh = devm_kzalloc(dev, sizeof(*sdcp->coh) + DCP_ALIGNMENT,
1015				   GFP_KERNEL);
1016	if (!sdcp->coh)
1017		return -ENOMEM;
1018
1019	/* Re-align the structure so it fits the DCP constraints. */
1020	sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT);
1021
1022	/* DCP clock is optional, only used on some SOCs */
1023	sdcp->dcp_clk = devm_clk_get_optional_enabled(dev, "dcp");
1024	if (IS_ERR(sdcp->dcp_clk))
1025		return PTR_ERR(sdcp->dcp_clk);
 
 
 
 
 
 
1026
1027	/* Restart the DCP block. */
1028	ret = stmp_reset_block(sdcp->base);
1029	if (ret) {
1030		dev_err(dev, "Failed reset\n");
1031		return ret;
1032	}
1033
1034	/* Initialize control register. */
1035	writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES |
1036	       MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING | 0xf,
1037	       sdcp->base + MXS_DCP_CTRL);
1038
1039	/* Enable all DCP DMA channels. */
1040	writel(MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK,
1041	       sdcp->base + MXS_DCP_CHANNELCTRL);
1042
1043	/*
1044	 * We do not enable context switching. Give the context buffer a
1045	 * pointer to an illegal address so if context switching is
1046	 * inadvertantly enabled, the DCP will return an error instead of
1047	 * trashing good memory. The DCP DMA cannot access ROM, so any ROM
1048	 * address will do.
1049	 */
1050	writel(0xffff0000, sdcp->base + MXS_DCP_CONTEXT);
1051	for (i = 0; i < DCP_MAX_CHANS; i++)
1052		writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(i));
1053	writel(0xffffffff, sdcp->base + MXS_DCP_STAT_CLR);
1054
1055	global_sdcp = sdcp;
1056
1057	platform_set_drvdata(pdev, sdcp);
1058
1059	for (i = 0; i < DCP_MAX_CHANS; i++) {
1060		spin_lock_init(&sdcp->lock[i]);
1061		init_completion(&sdcp->completion[i]);
1062		crypto_init_queue(&sdcp->queue[i], 50);
1063	}
1064
1065	/* Create the SHA and AES handler threads. */
1066	sdcp->thread[DCP_CHAN_HASH_SHA] = kthread_run(dcp_chan_thread_sha,
1067						      NULL, "mxs_dcp_chan/sha");
1068	if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) {
1069		dev_err(dev, "Error starting SHA thread!\n");
1070		ret = PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]);
1071		return ret;
1072	}
1073
1074	sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes,
1075						    NULL, "mxs_dcp_chan/aes");
1076	if (IS_ERR(sdcp->thread[DCP_CHAN_CRYPTO])) {
1077		dev_err(dev, "Error starting SHA thread!\n");
1078		ret = PTR_ERR(sdcp->thread[DCP_CHAN_CRYPTO]);
1079		goto err_destroy_sha_thread;
1080	}
1081
1082	/* Register the various crypto algorithms. */
1083	sdcp->caps = readl(sdcp->base + MXS_DCP_CAPABILITY1);
1084
1085	if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) {
1086		ret = crypto_register_skciphers(dcp_aes_algs,
1087						ARRAY_SIZE(dcp_aes_algs));
1088		if (ret) {
1089			/* Failed to register algorithm. */
1090			dev_err(dev, "Failed to register AES crypto!\n");
1091			goto err_destroy_aes_thread;
1092		}
1093	}
1094
1095	if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) {
1096		ret = crypto_register_ahash(&dcp_sha1_alg);
1097		if (ret) {
1098			dev_err(dev, "Failed to register %s hash!\n",
1099				dcp_sha1_alg.halg.base.cra_name);
1100			goto err_unregister_aes;
1101		}
1102	}
1103
1104	if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) {
1105		ret = crypto_register_ahash(&dcp_sha256_alg);
1106		if (ret) {
1107			dev_err(dev, "Failed to register %s hash!\n",
1108				dcp_sha256_alg.halg.base.cra_name);
1109			goto err_unregister_sha1;
1110		}
1111	}
1112
1113	return 0;
1114
1115err_unregister_sha1:
1116	if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
1117		crypto_unregister_ahash(&dcp_sha1_alg);
1118
1119err_unregister_aes:
1120	if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
1121		crypto_unregister_skciphers(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
1122
1123err_destroy_aes_thread:
1124	kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
1125
1126err_destroy_sha_thread:
1127	kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
1128
 
 
 
1129	return ret;
1130}
1131
1132static void mxs_dcp_remove(struct platform_device *pdev)
1133{
1134	struct dcp *sdcp = platform_get_drvdata(pdev);
1135
1136	if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256)
1137		crypto_unregister_ahash(&dcp_sha256_alg);
1138
1139	if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
1140		crypto_unregister_ahash(&dcp_sha1_alg);
1141
1142	if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
1143		crypto_unregister_skciphers(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
1144
1145	kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
1146	kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
1147
 
 
1148	platform_set_drvdata(pdev, NULL);
1149
1150	global_sdcp = NULL;
 
 
1151}
1152
1153static const struct of_device_id mxs_dcp_dt_ids[] = {
1154	{ .compatible = "fsl,imx23-dcp", .data = NULL, },
1155	{ .compatible = "fsl,imx28-dcp", .data = NULL, },
1156	{ /* sentinel */ }
1157};
1158
1159MODULE_DEVICE_TABLE(of, mxs_dcp_dt_ids);
1160
1161static struct platform_driver mxs_dcp_driver = {
1162	.probe	= mxs_dcp_probe,
1163	.remove_new = mxs_dcp_remove,
1164	.driver	= {
1165		.name		= "mxs-dcp",
1166		.of_match_table	= mxs_dcp_dt_ids,
1167	},
1168};
1169
1170module_platform_driver(mxs_dcp_driver);
1171
1172MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
1173MODULE_DESCRIPTION("Freescale MXS DCP Driver");
1174MODULE_LICENSE("GPL");
1175MODULE_ALIAS("platform:mxs-dcp");
v5.9
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Freescale i.MX23/i.MX28 Data Co-Processor driver
   4 *
   5 * Copyright (C) 2013 Marek Vasut <marex@denx.de>
   6 */
   7
   8#include <linux/dma-mapping.h>
   9#include <linux/interrupt.h>
  10#include <linux/io.h>
  11#include <linux/kernel.h>
  12#include <linux/kthread.h>
  13#include <linux/module.h>
  14#include <linux/of.h>
  15#include <linux/platform_device.h>
  16#include <linux/stmp_device.h>
  17#include <linux/clk.h>
  18
  19#include <crypto/aes.h>
  20#include <crypto/sha.h>
 
  21#include <crypto/internal/hash.h>
  22#include <crypto/internal/skcipher.h>
  23#include <crypto/scatterwalk.h>
  24
  25#define DCP_MAX_CHANS	4
  26#define DCP_BUF_SZ	PAGE_SIZE
  27#define DCP_SHA_PAY_SZ  64
  28
  29#define DCP_ALIGNMENT	64
  30
  31/*
  32 * Null hashes to align with hw behavior on imx6sl and ull
  33 * these are flipped for consistency with hw output
  34 */
  35static const uint8_t sha1_null_hash[] =
  36	"\x09\x07\xd8\xaf\x90\x18\x60\x95\xef\xbf"
  37	"\x55\x32\x0d\x4b\x6b\x5e\xee\xa3\x39\xda";
  38
  39static const uint8_t sha256_null_hash[] =
  40	"\x55\xb8\x52\x78\x1b\x99\x95\xa4"
  41	"\x4c\x93\x9b\x64\xe4\x41\xae\x27"
  42	"\x24\xb9\x6f\x99\xc8\xf4\xfb\x9a"
  43	"\x14\x1c\xfc\x98\x42\xc4\xb0\xe3";
  44
  45/* DCP DMA descriptor. */
  46struct dcp_dma_desc {
  47	uint32_t	next_cmd_addr;
  48	uint32_t	control0;
  49	uint32_t	control1;
  50	uint32_t	source;
  51	uint32_t	destination;
  52	uint32_t	size;
  53	uint32_t	payload;
  54	uint32_t	status;
  55};
  56
  57/* Coherent aligned block for bounce buffering. */
  58struct dcp_coherent_block {
  59	uint8_t			aes_in_buf[DCP_BUF_SZ];
  60	uint8_t			aes_out_buf[DCP_BUF_SZ];
  61	uint8_t			sha_in_buf[DCP_BUF_SZ];
  62	uint8_t			sha_out_buf[DCP_SHA_PAY_SZ];
  63
  64	uint8_t			aes_key[2 * AES_KEYSIZE_128];
  65
  66	struct dcp_dma_desc	desc[DCP_MAX_CHANS];
  67};
  68
  69struct dcp {
  70	struct device			*dev;
  71	void __iomem			*base;
  72
  73	uint32_t			caps;
  74
  75	struct dcp_coherent_block	*coh;
  76
  77	struct completion		completion[DCP_MAX_CHANS];
  78	spinlock_t			lock[DCP_MAX_CHANS];
  79	struct task_struct		*thread[DCP_MAX_CHANS];
  80	struct crypto_queue		queue[DCP_MAX_CHANS];
  81	struct clk			*dcp_clk;
  82};
  83
  84enum dcp_chan {
  85	DCP_CHAN_HASH_SHA	= 0,
  86	DCP_CHAN_CRYPTO		= 2,
  87};
  88
  89struct dcp_async_ctx {
  90	/* Common context */
  91	enum dcp_chan	chan;
  92	uint32_t	fill;
  93
  94	/* SHA Hash-specific context */
  95	struct mutex			mutex;
  96	uint32_t			alg;
  97	unsigned int			hot:1;
  98
  99	/* Crypto-specific context */
 100	struct crypto_skcipher		*fallback;
 101	unsigned int			key_len;
 102	uint8_t				key[AES_KEYSIZE_128];
 103};
 104
 105struct dcp_aes_req_ctx {
 106	unsigned int	enc:1;
 107	unsigned int	ecb:1;
 108	struct skcipher_request fallback_req;	// keep at the end
 109};
 110
 111struct dcp_sha_req_ctx {
 112	unsigned int	init:1;
 113	unsigned int	fini:1;
 114};
 115
 116struct dcp_export_state {
 117	struct dcp_sha_req_ctx req_ctx;
 118	struct dcp_async_ctx async_ctx;
 119};
 120
 121/*
 122 * There can even be only one instance of the MXS DCP due to the
 123 * design of Linux Crypto API.
 124 */
 125static struct dcp *global_sdcp;
 126
 127/* DCP register layout. */
 128#define MXS_DCP_CTRL				0x00
 129#define MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES	(1 << 23)
 130#define MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING	(1 << 22)
 131
 132#define MXS_DCP_STAT				0x10
 133#define MXS_DCP_STAT_CLR			0x18
 134#define MXS_DCP_STAT_IRQ_MASK			0xf
 135
 136#define MXS_DCP_CHANNELCTRL			0x20
 137#define MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK	0xff
 138
 139#define MXS_DCP_CAPABILITY1			0x40
 140#define MXS_DCP_CAPABILITY1_SHA256		(4 << 16)
 141#define MXS_DCP_CAPABILITY1_SHA1		(1 << 16)
 142#define MXS_DCP_CAPABILITY1_AES128		(1 << 0)
 143
 144#define MXS_DCP_CONTEXT				0x50
 145
 146#define MXS_DCP_CH_N_CMDPTR(n)			(0x100 + ((n) * 0x40))
 147
 148#define MXS_DCP_CH_N_SEMA(n)			(0x110 + ((n) * 0x40))
 149
 150#define MXS_DCP_CH_N_STAT(n)			(0x120 + ((n) * 0x40))
 151#define MXS_DCP_CH_N_STAT_CLR(n)		(0x128 + ((n) * 0x40))
 152
 153/* DMA descriptor bits. */
 154#define MXS_DCP_CONTROL0_HASH_TERM		(1 << 13)
 155#define MXS_DCP_CONTROL0_HASH_INIT		(1 << 12)
 156#define MXS_DCP_CONTROL0_PAYLOAD_KEY		(1 << 11)
 157#define MXS_DCP_CONTROL0_CIPHER_ENCRYPT		(1 << 8)
 158#define MXS_DCP_CONTROL0_CIPHER_INIT		(1 << 9)
 159#define MXS_DCP_CONTROL0_ENABLE_HASH		(1 << 6)
 160#define MXS_DCP_CONTROL0_ENABLE_CIPHER		(1 << 5)
 161#define MXS_DCP_CONTROL0_DECR_SEMAPHORE		(1 << 1)
 162#define MXS_DCP_CONTROL0_INTERRUPT		(1 << 0)
 163
 164#define MXS_DCP_CONTROL1_HASH_SELECT_SHA256	(2 << 16)
 165#define MXS_DCP_CONTROL1_HASH_SELECT_SHA1	(0 << 16)
 166#define MXS_DCP_CONTROL1_CIPHER_MODE_CBC	(1 << 4)
 167#define MXS_DCP_CONTROL1_CIPHER_MODE_ECB	(0 << 4)
 168#define MXS_DCP_CONTROL1_CIPHER_SELECT_AES128	(0 << 0)
 169
 170static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
 171{
 
 172	struct dcp *sdcp = global_sdcp;
 173	const int chan = actx->chan;
 174	uint32_t stat;
 175	unsigned long ret;
 176	struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
 177
 178	dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc),
 179					      DMA_TO_DEVICE);
 180
 
 
 
 
 181	reinit_completion(&sdcp->completion[chan]);
 182
 183	/* Clear status register. */
 184	writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(chan));
 185
 186	/* Load the DMA descriptor. */
 187	writel(desc_phys, sdcp->base + MXS_DCP_CH_N_CMDPTR(chan));
 188
 189	/* Increment the semaphore to start the DMA transfer. */
 190	writel(1, sdcp->base + MXS_DCP_CH_N_SEMA(chan));
 191
 192	ret = wait_for_completion_timeout(&sdcp->completion[chan],
 193					  msecs_to_jiffies(1000));
 194	if (!ret) {
 195		dev_err(sdcp->dev, "Channel %i timeout (DCP_STAT=0x%08x)\n",
 196			chan, readl(sdcp->base + MXS_DCP_STAT));
 197		return -ETIMEDOUT;
 198	}
 199
 200	stat = readl(sdcp->base + MXS_DCP_CH_N_STAT(chan));
 201	if (stat & 0xff) {
 202		dev_err(sdcp->dev, "Channel %i error (CH_STAT=0x%08x)\n",
 203			chan, stat);
 204		return -EINVAL;
 205	}
 206
 207	dma_unmap_single(sdcp->dev, desc_phys, sizeof(*desc), DMA_TO_DEVICE);
 208
 209	return 0;
 210}
 211
 212/*
 213 * Encryption (AES128)
 214 */
 215static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
 216			   struct skcipher_request *req, int init)
 217{
 
 218	struct dcp *sdcp = global_sdcp;
 219	struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
 220	struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
 221	int ret;
 222
 223	dma_addr_t key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
 224					     2 * AES_KEYSIZE_128,
 225					     DMA_TO_DEVICE);
 226	dma_addr_t src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
 227					     DCP_BUF_SZ, DMA_TO_DEVICE);
 228	dma_addr_t dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
 229					     DCP_BUF_SZ, DMA_FROM_DEVICE);
 
 
 
 
 
 
 
 
 
 
 230
 231	if (actx->fill % AES_BLOCK_SIZE) {
 232		dev_err(sdcp->dev, "Invalid block size!\n");
 233		ret = -EINVAL;
 234		goto aes_done_run;
 235	}
 236
 237	/* Fill in the DMA descriptor. */
 238	desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
 239		    MXS_DCP_CONTROL0_INTERRUPT |
 240		    MXS_DCP_CONTROL0_ENABLE_CIPHER;
 241
 242	/* Payload contains the key. */
 243	desc->control0 |= MXS_DCP_CONTROL0_PAYLOAD_KEY;
 244
 245	if (rctx->enc)
 246		desc->control0 |= MXS_DCP_CONTROL0_CIPHER_ENCRYPT;
 247	if (init)
 248		desc->control0 |= MXS_DCP_CONTROL0_CIPHER_INIT;
 249
 250	desc->control1 = MXS_DCP_CONTROL1_CIPHER_SELECT_AES128;
 251
 252	if (rctx->ecb)
 253		desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_ECB;
 254	else
 255		desc->control1 |= MXS_DCP_CONTROL1_CIPHER_MODE_CBC;
 256
 257	desc->next_cmd_addr = 0;
 258	desc->source = src_phys;
 259	desc->destination = dst_phys;
 260	desc->size = actx->fill;
 261	desc->payload = key_phys;
 262	desc->status = 0;
 263
 264	ret = mxs_dcp_start_dma(actx);
 265
 266aes_done_run:
 
 
 
 
 267	dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
 268			 DMA_TO_DEVICE);
 269	dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
 270	dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE);
 271
 272	return ret;
 273}
 274
 275static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
 276{
 277	struct dcp *sdcp = global_sdcp;
 278
 279	struct skcipher_request *req = skcipher_request_cast(arq);
 280	struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
 281	struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
 282
 283	struct scatterlist *dst = req->dst;
 284	struct scatterlist *src = req->src;
 285	const int nents = sg_nents(req->src);
 286
 287	const int out_off = DCP_BUF_SZ;
 288	uint8_t *in_buf = sdcp->coh->aes_in_buf;
 289	uint8_t *out_buf = sdcp->coh->aes_out_buf;
 290
 291	uint8_t *out_tmp, *src_buf, *dst_buf = NULL;
 292	uint32_t dst_off = 0;
 
 293	uint32_t last_out_len = 0;
 294
 295	uint8_t *key = sdcp->coh->aes_key;
 296
 297	int ret = 0;
 298	int split = 0;
 299	unsigned int i, len, clen, rem = 0, tlen = 0;
 300	int init = 0;
 301	bool limit_hit = false;
 302
 303	actx->fill = 0;
 304
 305	/* Copy the key from the temporary location. */
 306	memcpy(key, actx->key, actx->key_len);
 307
 308	if (!rctx->ecb) {
 309		/* Copy the CBC IV just past the key. */
 310		memcpy(key + AES_KEYSIZE_128, req->iv, AES_KEYSIZE_128);
 311		/* CBC needs the INIT set. */
 312		init = 1;
 313	} else {
 314		memset(key + AES_KEYSIZE_128, 0, AES_KEYSIZE_128);
 315	}
 316
 317	for_each_sg(req->src, src, nents, i) {
 318		src_buf = sg_virt(src);
 319		len = sg_dma_len(src);
 320		tlen += len;
 321		limit_hit = tlen > req->cryptlen;
 322
 323		if (limit_hit)
 324			len = req->cryptlen - (tlen - len);
 325
 326		do {
 327			if (actx->fill + len > out_off)
 328				clen = out_off - actx->fill;
 329			else
 330				clen = len;
 331
 332			memcpy(in_buf + actx->fill, src_buf, clen);
 333			len -= clen;
 334			src_buf += clen;
 335			actx->fill += clen;
 336
 337			/*
 338			 * If we filled the buffer or this is the last SG,
 339			 * submit the buffer.
 340			 */
 341			if (actx->fill == out_off || sg_is_last(src) ||
 342				limit_hit) {
 343				ret = mxs_dcp_run_aes(actx, req, init);
 344				if (ret)
 345					return ret;
 346				init = 0;
 347
 348				out_tmp = out_buf;
 
 
 349				last_out_len = actx->fill;
 350				while (dst && actx->fill) {
 351					if (!split) {
 352						dst_buf = sg_virt(dst);
 353						dst_off = 0;
 354					}
 355					rem = min(sg_dma_len(dst) - dst_off,
 356						  actx->fill);
 357
 358					memcpy(dst_buf + dst_off, out_tmp, rem);
 359					out_tmp += rem;
 360					dst_off += rem;
 361					actx->fill -= rem;
 362
 363					if (dst_off == sg_dma_len(dst)) {
 364						dst = sg_next(dst);
 365						split = 0;
 366					} else {
 367						split = 1;
 368					}
 369				}
 370			}
 371		} while (len);
 372
 373		if (limit_hit)
 374			break;
 375	}
 376
 377	/* Copy the IV for CBC for chaining */
 378	if (!rctx->ecb) {
 379		if (rctx->enc)
 380			memcpy(req->iv, out_buf+(last_out_len-AES_BLOCK_SIZE),
 381				AES_BLOCK_SIZE);
 382		else
 383			memcpy(req->iv, in_buf+(last_out_len-AES_BLOCK_SIZE),
 384				AES_BLOCK_SIZE);
 385	}
 386
 387	return ret;
 388}
 389
 390static int dcp_chan_thread_aes(void *data)
 391{
 392	struct dcp *sdcp = global_sdcp;
 393	const int chan = DCP_CHAN_CRYPTO;
 394
 395	struct crypto_async_request *backlog;
 396	struct crypto_async_request *arq;
 397
 398	int ret;
 399
 400	while (!kthread_should_stop()) {
 401		set_current_state(TASK_INTERRUPTIBLE);
 402
 403		spin_lock(&sdcp->lock[chan]);
 404		backlog = crypto_get_backlog(&sdcp->queue[chan]);
 405		arq = crypto_dequeue_request(&sdcp->queue[chan]);
 406		spin_unlock(&sdcp->lock[chan]);
 407
 408		if (!backlog && !arq) {
 409			schedule();
 410			continue;
 411		}
 412
 413		set_current_state(TASK_RUNNING);
 414
 415		if (backlog)
 416			backlog->complete(backlog, -EINPROGRESS);
 417
 418		if (arq) {
 419			ret = mxs_dcp_aes_block_crypt(arq);
 420			arq->complete(arq, ret);
 421		}
 422	}
 423
 424	return 0;
 425}
 426
 427static int mxs_dcp_block_fallback(struct skcipher_request *req, int enc)
 428{
 429	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
 430	struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
 431	struct dcp_async_ctx *ctx = crypto_skcipher_ctx(tfm);
 432	int ret;
 433
 434	skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
 435	skcipher_request_set_callback(&rctx->fallback_req, req->base.flags,
 436				      req->base.complete, req->base.data);
 437	skcipher_request_set_crypt(&rctx->fallback_req, req->src, req->dst,
 438				   req->cryptlen, req->iv);
 439
 440	if (enc)
 441		ret = crypto_skcipher_encrypt(&rctx->fallback_req);
 442	else
 443		ret = crypto_skcipher_decrypt(&rctx->fallback_req);
 444
 445	return ret;
 446}
 447
 448static int mxs_dcp_aes_enqueue(struct skcipher_request *req, int enc, int ecb)
 449{
 450	struct dcp *sdcp = global_sdcp;
 451	struct crypto_async_request *arq = &req->base;
 452	struct dcp_async_ctx *actx = crypto_tfm_ctx(arq->tfm);
 453	struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
 454	int ret;
 455
 456	if (unlikely(actx->key_len != AES_KEYSIZE_128))
 457		return mxs_dcp_block_fallback(req, enc);
 458
 459	rctx->enc = enc;
 460	rctx->ecb = ecb;
 461	actx->chan = DCP_CHAN_CRYPTO;
 462
 463	spin_lock(&sdcp->lock[actx->chan]);
 464	ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
 465	spin_unlock(&sdcp->lock[actx->chan]);
 466
 467	wake_up_process(sdcp->thread[actx->chan]);
 468
 469	return ret;
 470}
 471
 472static int mxs_dcp_aes_ecb_decrypt(struct skcipher_request *req)
 473{
 474	return mxs_dcp_aes_enqueue(req, 0, 1);
 475}
 476
 477static int mxs_dcp_aes_ecb_encrypt(struct skcipher_request *req)
 478{
 479	return mxs_dcp_aes_enqueue(req, 1, 1);
 480}
 481
 482static int mxs_dcp_aes_cbc_decrypt(struct skcipher_request *req)
 483{
 484	return mxs_dcp_aes_enqueue(req, 0, 0);
 485}
 486
 487static int mxs_dcp_aes_cbc_encrypt(struct skcipher_request *req)
 488{
 489	return mxs_dcp_aes_enqueue(req, 1, 0);
 490}
 491
 492static int mxs_dcp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
 493			      unsigned int len)
 494{
 495	struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
 496
 497	/*
 498	 * AES 128 is supposed by the hardware, store key into temporary
 499	 * buffer and exit. We must use the temporary buffer here, since
 500	 * there can still be an operation in progress.
 501	 */
 502	actx->key_len = len;
 503	if (len == AES_KEYSIZE_128) {
 504		memcpy(actx->key, key, len);
 505		return 0;
 506	}
 507
 508	/*
 509	 * If the requested AES key size is not supported by the hardware,
 510	 * but is supported by in-kernel software implementation, we use
 511	 * software fallback.
 512	 */
 513	crypto_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK);
 514	crypto_skcipher_set_flags(actx->fallback,
 515				  tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
 516	return crypto_skcipher_setkey(actx->fallback, key, len);
 517}
 518
 519static int mxs_dcp_aes_fallback_init_tfm(struct crypto_skcipher *tfm)
 520{
 521	const char *name = crypto_tfm_alg_name(crypto_skcipher_tfm(tfm));
 522	struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
 523	struct crypto_skcipher *blk;
 524
 525	blk = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
 526	if (IS_ERR(blk))
 527		return PTR_ERR(blk);
 528
 529	actx->fallback = blk;
 530	crypto_skcipher_set_reqsize(tfm, sizeof(struct dcp_aes_req_ctx) +
 531					 crypto_skcipher_reqsize(blk));
 532	return 0;
 533}
 534
 535static void mxs_dcp_aes_fallback_exit_tfm(struct crypto_skcipher *tfm)
 536{
 537	struct dcp_async_ctx *actx = crypto_skcipher_ctx(tfm);
 538
 539	crypto_free_skcipher(actx->fallback);
 540}
 541
 542/*
 543 * Hashing (SHA1/SHA256)
 544 */
 545static int mxs_dcp_run_sha(struct ahash_request *req)
 546{
 547	struct dcp *sdcp = global_sdcp;
 548	int ret;
 549
 550	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 551	struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
 552	struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
 553	struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
 554
 555	dma_addr_t digest_phys = 0;
 556	dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf,
 557					     DCP_BUF_SZ, DMA_TO_DEVICE);
 558
 
 
 
 
 559	/* Fill in the DMA descriptor. */
 560	desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
 561		    MXS_DCP_CONTROL0_INTERRUPT |
 562		    MXS_DCP_CONTROL0_ENABLE_HASH;
 563	if (rctx->init)
 564		desc->control0 |= MXS_DCP_CONTROL0_HASH_INIT;
 565
 566	desc->control1 = actx->alg;
 567	desc->next_cmd_addr = 0;
 568	desc->source = buf_phys;
 569	desc->destination = 0;
 570	desc->size = actx->fill;
 571	desc->payload = 0;
 572	desc->status = 0;
 573
 574	/*
 575	 * Align driver with hw behavior when generating null hashes
 576	 */
 577	if (rctx->init && rctx->fini && desc->size == 0) {
 578		struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
 579		const uint8_t *sha_buf =
 580			(actx->alg == MXS_DCP_CONTROL1_HASH_SELECT_SHA1) ?
 581			sha1_null_hash : sha256_null_hash;
 582		memcpy(sdcp->coh->sha_out_buf, sha_buf, halg->digestsize);
 583		ret = 0;
 584		goto done_run;
 585	}
 586
 587	/* Set HASH_TERM bit for last transfer block. */
 588	if (rctx->fini) {
 589		digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf,
 590					     DCP_SHA_PAY_SZ, DMA_FROM_DEVICE);
 
 
 
 
 591		desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM;
 592		desc->payload = digest_phys;
 593	}
 594
 595	ret = mxs_dcp_start_dma(actx);
 596
 597	if (rctx->fini)
 598		dma_unmap_single(sdcp->dev, digest_phys, DCP_SHA_PAY_SZ,
 599				 DMA_FROM_DEVICE);
 600
 601done_run:
 602	dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
 603
 604	return ret;
 605}
 606
 607static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
 608{
 609	struct dcp *sdcp = global_sdcp;
 610
 611	struct ahash_request *req = ahash_request_cast(arq);
 612	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 613	struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
 614	struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
 615	struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
 616
 617	uint8_t *in_buf = sdcp->coh->sha_in_buf;
 618	uint8_t *out_buf = sdcp->coh->sha_out_buf;
 619
 620	struct scatterlist *src;
 621
 622	unsigned int i, len, clen, oft = 0;
 623	int ret;
 624
 625	int fin = rctx->fini;
 626	if (fin)
 627		rctx->fini = 0;
 628
 629	src = req->src;
 630	len = req->nbytes;
 631
 632	while (len) {
 633		if (actx->fill + len > DCP_BUF_SZ)
 634			clen = DCP_BUF_SZ - actx->fill;
 635		else
 636			clen = len;
 637
 638		scatterwalk_map_and_copy(in_buf + actx->fill, src, oft, clen,
 639					 0);
 640
 641		len -= clen;
 642		oft += clen;
 643		actx->fill += clen;
 644
 645		/*
 646		 * If we filled the buffer and still have some
 647		 * more data, submit the buffer.
 648		 */
 649		if (len && actx->fill == DCP_BUF_SZ) {
 650			ret = mxs_dcp_run_sha(req);
 651			if (ret)
 652				return ret;
 653			actx->fill = 0;
 654			rctx->init = 0;
 655		}
 656	}
 657
 658	if (fin) {
 659		rctx->fini = 1;
 660
 661		/* Submit whatever is left. */
 662		if (!req->result)
 663			return -EINVAL;
 664
 665		ret = mxs_dcp_run_sha(req);
 666		if (ret)
 667			return ret;
 668
 669		actx->fill = 0;
 670
 671		/* For some reason the result is flipped */
 672		for (i = 0; i < halg->digestsize; i++)
 673			req->result[i] = out_buf[halg->digestsize - i - 1];
 674	}
 675
 676	return 0;
 677}
 678
 679static int dcp_chan_thread_sha(void *data)
 680{
 681	struct dcp *sdcp = global_sdcp;
 682	const int chan = DCP_CHAN_HASH_SHA;
 683
 684	struct crypto_async_request *backlog;
 685	struct crypto_async_request *arq;
 686	int ret;
 687
 688	while (!kthread_should_stop()) {
 689		set_current_state(TASK_INTERRUPTIBLE);
 690
 691		spin_lock(&sdcp->lock[chan]);
 692		backlog = crypto_get_backlog(&sdcp->queue[chan]);
 693		arq = crypto_dequeue_request(&sdcp->queue[chan]);
 694		spin_unlock(&sdcp->lock[chan]);
 695
 696		if (!backlog && !arq) {
 697			schedule();
 698			continue;
 699		}
 700
 701		set_current_state(TASK_RUNNING);
 702
 703		if (backlog)
 704			backlog->complete(backlog, -EINPROGRESS);
 705
 706		if (arq) {
 707			ret = dcp_sha_req_to_buf(arq);
 708			arq->complete(arq, ret);
 709		}
 710	}
 711
 712	return 0;
 713}
 714
 715static int dcp_sha_init(struct ahash_request *req)
 716{
 717	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 718	struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
 719
 720	struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
 721
 722	/*
 723	 * Start hashing session. The code below only inits the
 724	 * hashing session context, nothing more.
 725	 */
 726	memset(actx, 0, sizeof(*actx));
 727
 728	if (strcmp(halg->base.cra_name, "sha1") == 0)
 729		actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA1;
 730	else
 731		actx->alg = MXS_DCP_CONTROL1_HASH_SELECT_SHA256;
 732
 733	actx->fill = 0;
 734	actx->hot = 0;
 735	actx->chan = DCP_CHAN_HASH_SHA;
 736
 737	mutex_init(&actx->mutex);
 738
 739	return 0;
 740}
 741
 742static int dcp_sha_update_fx(struct ahash_request *req, int fini)
 743{
 744	struct dcp *sdcp = global_sdcp;
 745
 746	struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
 747	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 748	struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
 749
 750	int ret;
 751
 752	/*
 753	 * Ignore requests that have no data in them and are not
 754	 * the trailing requests in the stream of requests.
 755	 */
 756	if (!req->nbytes && !fini)
 757		return 0;
 758
 759	mutex_lock(&actx->mutex);
 760
 761	rctx->fini = fini;
 762
 763	if (!actx->hot) {
 764		actx->hot = 1;
 765		rctx->init = 1;
 766	}
 767
 768	spin_lock(&sdcp->lock[actx->chan]);
 769	ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
 770	spin_unlock(&sdcp->lock[actx->chan]);
 771
 772	wake_up_process(sdcp->thread[actx->chan]);
 773	mutex_unlock(&actx->mutex);
 774
 775	return ret;
 776}
 777
 778static int dcp_sha_update(struct ahash_request *req)
 779{
 780	return dcp_sha_update_fx(req, 0);
 781}
 782
 783static int dcp_sha_final(struct ahash_request *req)
 784{
 785	ahash_request_set_crypt(req, NULL, req->result, 0);
 786	req->nbytes = 0;
 787	return dcp_sha_update_fx(req, 1);
 788}
 789
 790static int dcp_sha_finup(struct ahash_request *req)
 791{
 792	return dcp_sha_update_fx(req, 1);
 793}
 794
 795static int dcp_sha_digest(struct ahash_request *req)
 796{
 797	int ret;
 798
 799	ret = dcp_sha_init(req);
 800	if (ret)
 801		return ret;
 802
 803	return dcp_sha_finup(req);
 804}
 805
 806static int dcp_sha_import(struct ahash_request *req, const void *in)
 807{
 808	struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
 809	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 810	struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
 811	const struct dcp_export_state *export = in;
 812
 813	memset(rctx, 0, sizeof(struct dcp_sha_req_ctx));
 814	memset(actx, 0, sizeof(struct dcp_async_ctx));
 815	memcpy(rctx, &export->req_ctx, sizeof(struct dcp_sha_req_ctx));
 816	memcpy(actx, &export->async_ctx, sizeof(struct dcp_async_ctx));
 817
 818	return 0;
 819}
 820
 821static int dcp_sha_export(struct ahash_request *req, void *out)
 822{
 823	struct dcp_sha_req_ctx *rctx_state = ahash_request_ctx(req);
 824	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
 825	struct dcp_async_ctx *actx_state = crypto_ahash_ctx(tfm);
 826	struct dcp_export_state *export = out;
 827
 828	memcpy(&export->req_ctx, rctx_state, sizeof(struct dcp_sha_req_ctx));
 829	memcpy(&export->async_ctx, actx_state, sizeof(struct dcp_async_ctx));
 830
 831	return 0;
 832}
 833
 834static int dcp_sha_cra_init(struct crypto_tfm *tfm)
 835{
 836	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
 837				 sizeof(struct dcp_sha_req_ctx));
 838	return 0;
 839}
 840
 841static void dcp_sha_cra_exit(struct crypto_tfm *tfm)
 842{
 843}
 844
 845/* AES 128 ECB and AES 128 CBC */
 846static struct skcipher_alg dcp_aes_algs[] = {
 847	{
 848		.base.cra_name		= "ecb(aes)",
 849		.base.cra_driver_name	= "ecb-aes-dcp",
 850		.base.cra_priority	= 400,
 851		.base.cra_alignmask	= 15,
 852		.base.cra_flags		= CRYPTO_ALG_ASYNC |
 853					  CRYPTO_ALG_NEED_FALLBACK,
 854		.base.cra_blocksize	= AES_BLOCK_SIZE,
 855		.base.cra_ctxsize	= sizeof(struct dcp_async_ctx),
 856		.base.cra_module	= THIS_MODULE,
 857
 858		.min_keysize		= AES_MIN_KEY_SIZE,
 859		.max_keysize		= AES_MAX_KEY_SIZE,
 860		.setkey			= mxs_dcp_aes_setkey,
 861		.encrypt		= mxs_dcp_aes_ecb_encrypt,
 862		.decrypt		= mxs_dcp_aes_ecb_decrypt,
 863		.init			= mxs_dcp_aes_fallback_init_tfm,
 864		.exit			= mxs_dcp_aes_fallback_exit_tfm,
 865	}, {
 866		.base.cra_name		= "cbc(aes)",
 867		.base.cra_driver_name	= "cbc-aes-dcp",
 868		.base.cra_priority	= 400,
 869		.base.cra_alignmask	= 15,
 870		.base.cra_flags		= CRYPTO_ALG_ASYNC |
 871					  CRYPTO_ALG_NEED_FALLBACK,
 872		.base.cra_blocksize	= AES_BLOCK_SIZE,
 873		.base.cra_ctxsize	= sizeof(struct dcp_async_ctx),
 874		.base.cra_module	= THIS_MODULE,
 875
 876		.min_keysize		= AES_MIN_KEY_SIZE,
 877		.max_keysize		= AES_MAX_KEY_SIZE,
 878		.setkey			= mxs_dcp_aes_setkey,
 879		.encrypt		= mxs_dcp_aes_cbc_encrypt,
 880		.decrypt		= mxs_dcp_aes_cbc_decrypt,
 881		.ivsize			= AES_BLOCK_SIZE,
 882		.init			= mxs_dcp_aes_fallback_init_tfm,
 883		.exit			= mxs_dcp_aes_fallback_exit_tfm,
 884	},
 885};
 886
 887/* SHA1 */
 888static struct ahash_alg dcp_sha1_alg = {
 889	.init	= dcp_sha_init,
 890	.update	= dcp_sha_update,
 891	.final	= dcp_sha_final,
 892	.finup	= dcp_sha_finup,
 893	.digest	= dcp_sha_digest,
 894	.import = dcp_sha_import,
 895	.export = dcp_sha_export,
 896	.halg	= {
 897		.digestsize	= SHA1_DIGEST_SIZE,
 898		.statesize	= sizeof(struct dcp_export_state),
 899		.base		= {
 900			.cra_name		= "sha1",
 901			.cra_driver_name	= "sha1-dcp",
 902			.cra_priority		= 400,
 903			.cra_alignmask		= 63,
 904			.cra_flags		= CRYPTO_ALG_ASYNC,
 905			.cra_blocksize		= SHA1_BLOCK_SIZE,
 906			.cra_ctxsize		= sizeof(struct dcp_async_ctx),
 907			.cra_module		= THIS_MODULE,
 908			.cra_init		= dcp_sha_cra_init,
 909			.cra_exit		= dcp_sha_cra_exit,
 910		},
 911	},
 912};
 913
 914/* SHA256 */
 915static struct ahash_alg dcp_sha256_alg = {
 916	.init	= dcp_sha_init,
 917	.update	= dcp_sha_update,
 918	.final	= dcp_sha_final,
 919	.finup	= dcp_sha_finup,
 920	.digest	= dcp_sha_digest,
 921	.import = dcp_sha_import,
 922	.export = dcp_sha_export,
 923	.halg	= {
 924		.digestsize	= SHA256_DIGEST_SIZE,
 925		.statesize	= sizeof(struct dcp_export_state),
 926		.base		= {
 927			.cra_name		= "sha256",
 928			.cra_driver_name	= "sha256-dcp",
 929			.cra_priority		= 400,
 930			.cra_alignmask		= 63,
 931			.cra_flags		= CRYPTO_ALG_ASYNC,
 932			.cra_blocksize		= SHA256_BLOCK_SIZE,
 933			.cra_ctxsize		= sizeof(struct dcp_async_ctx),
 934			.cra_module		= THIS_MODULE,
 935			.cra_init		= dcp_sha_cra_init,
 936			.cra_exit		= dcp_sha_cra_exit,
 937		},
 938	},
 939};
 940
 941static irqreturn_t mxs_dcp_irq(int irq, void *context)
 942{
 943	struct dcp *sdcp = context;
 944	uint32_t stat;
 945	int i;
 946
 947	stat = readl(sdcp->base + MXS_DCP_STAT);
 948	stat &= MXS_DCP_STAT_IRQ_MASK;
 949	if (!stat)
 950		return IRQ_NONE;
 951
 952	/* Clear the interrupts. */
 953	writel(stat, sdcp->base + MXS_DCP_STAT_CLR);
 954
 955	/* Complete the DMA requests that finished. */
 956	for (i = 0; i < DCP_MAX_CHANS; i++)
 957		if (stat & (1 << i))
 958			complete(&sdcp->completion[i]);
 959
 960	return IRQ_HANDLED;
 961}
 962
 963static int mxs_dcp_probe(struct platform_device *pdev)
 964{
 965	struct device *dev = &pdev->dev;
 966	struct dcp *sdcp = NULL;
 967	int i, ret;
 968	int dcp_vmi_irq, dcp_irq;
 969
 970	if (global_sdcp) {
 971		dev_err(dev, "Only one DCP instance allowed!\n");
 972		return -ENODEV;
 973	}
 974
 975	dcp_vmi_irq = platform_get_irq(pdev, 0);
 976	if (dcp_vmi_irq < 0)
 977		return dcp_vmi_irq;
 978
 979	dcp_irq = platform_get_irq(pdev, 1);
 980	if (dcp_irq < 0)
 981		return dcp_irq;
 982
 983	sdcp = devm_kzalloc(dev, sizeof(*sdcp), GFP_KERNEL);
 984	if (!sdcp)
 985		return -ENOMEM;
 986
 987	sdcp->dev = dev;
 988	sdcp->base = devm_platform_ioremap_resource(pdev, 0);
 989	if (IS_ERR(sdcp->base))
 990		return PTR_ERR(sdcp->base);
 991
 992
 993	ret = devm_request_irq(dev, dcp_vmi_irq, mxs_dcp_irq, 0,
 994			       "dcp-vmi-irq", sdcp);
 995	if (ret) {
 996		dev_err(dev, "Failed to claim DCP VMI IRQ!\n");
 997		return ret;
 998	}
 999
1000	ret = devm_request_irq(dev, dcp_irq, mxs_dcp_irq, 0,
1001			       "dcp-irq", sdcp);
1002	if (ret) {
1003		dev_err(dev, "Failed to claim DCP IRQ!\n");
1004		return ret;
1005	}
1006
1007	/* Allocate coherent helper block. */
1008	sdcp->coh = devm_kzalloc(dev, sizeof(*sdcp->coh) + DCP_ALIGNMENT,
1009				   GFP_KERNEL);
1010	if (!sdcp->coh)
1011		return -ENOMEM;
1012
1013	/* Re-align the structure so it fits the DCP constraints. */
1014	sdcp->coh = PTR_ALIGN(sdcp->coh, DCP_ALIGNMENT);
1015
1016	/* DCP clock is optional, only used on some SOCs */
1017	sdcp->dcp_clk = devm_clk_get(dev, "dcp");
1018	if (IS_ERR(sdcp->dcp_clk)) {
1019		if (sdcp->dcp_clk != ERR_PTR(-ENOENT))
1020			return PTR_ERR(sdcp->dcp_clk);
1021		sdcp->dcp_clk = NULL;
1022	}
1023	ret = clk_prepare_enable(sdcp->dcp_clk);
1024	if (ret)
1025		return ret;
1026
1027	/* Restart the DCP block. */
1028	ret = stmp_reset_block(sdcp->base);
1029	if (ret) {
1030		dev_err(dev, "Failed reset\n");
1031		goto err_disable_unprepare_clk;
1032	}
1033
1034	/* Initialize control register. */
1035	writel(MXS_DCP_CTRL_GATHER_RESIDUAL_WRITES |
1036	       MXS_DCP_CTRL_ENABLE_CONTEXT_CACHING | 0xf,
1037	       sdcp->base + MXS_DCP_CTRL);
1038
1039	/* Enable all DCP DMA channels. */
1040	writel(MXS_DCP_CHANNELCTRL_ENABLE_CHANNEL_MASK,
1041	       sdcp->base + MXS_DCP_CHANNELCTRL);
1042
1043	/*
1044	 * We do not enable context switching. Give the context buffer a
1045	 * pointer to an illegal address so if context switching is
1046	 * inadvertantly enabled, the DCP will return an error instead of
1047	 * trashing good memory. The DCP DMA cannot access ROM, so any ROM
1048	 * address will do.
1049	 */
1050	writel(0xffff0000, sdcp->base + MXS_DCP_CONTEXT);
1051	for (i = 0; i < DCP_MAX_CHANS; i++)
1052		writel(0xffffffff, sdcp->base + MXS_DCP_CH_N_STAT_CLR(i));
1053	writel(0xffffffff, sdcp->base + MXS_DCP_STAT_CLR);
1054
1055	global_sdcp = sdcp;
1056
1057	platform_set_drvdata(pdev, sdcp);
1058
1059	for (i = 0; i < DCP_MAX_CHANS; i++) {
1060		spin_lock_init(&sdcp->lock[i]);
1061		init_completion(&sdcp->completion[i]);
1062		crypto_init_queue(&sdcp->queue[i], 50);
1063	}
1064
1065	/* Create the SHA and AES handler threads. */
1066	sdcp->thread[DCP_CHAN_HASH_SHA] = kthread_run(dcp_chan_thread_sha,
1067						      NULL, "mxs_dcp_chan/sha");
1068	if (IS_ERR(sdcp->thread[DCP_CHAN_HASH_SHA])) {
1069		dev_err(dev, "Error starting SHA thread!\n");
1070		ret = PTR_ERR(sdcp->thread[DCP_CHAN_HASH_SHA]);
1071		goto err_disable_unprepare_clk;
1072	}
1073
1074	sdcp->thread[DCP_CHAN_CRYPTO] = kthread_run(dcp_chan_thread_aes,
1075						    NULL, "mxs_dcp_chan/aes");
1076	if (IS_ERR(sdcp->thread[DCP_CHAN_CRYPTO])) {
1077		dev_err(dev, "Error starting SHA thread!\n");
1078		ret = PTR_ERR(sdcp->thread[DCP_CHAN_CRYPTO]);
1079		goto err_destroy_sha_thread;
1080	}
1081
1082	/* Register the various crypto algorithms. */
1083	sdcp->caps = readl(sdcp->base + MXS_DCP_CAPABILITY1);
1084
1085	if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128) {
1086		ret = crypto_register_skciphers(dcp_aes_algs,
1087						ARRAY_SIZE(dcp_aes_algs));
1088		if (ret) {
1089			/* Failed to register algorithm. */
1090			dev_err(dev, "Failed to register AES crypto!\n");
1091			goto err_destroy_aes_thread;
1092		}
1093	}
1094
1095	if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1) {
1096		ret = crypto_register_ahash(&dcp_sha1_alg);
1097		if (ret) {
1098			dev_err(dev, "Failed to register %s hash!\n",
1099				dcp_sha1_alg.halg.base.cra_name);
1100			goto err_unregister_aes;
1101		}
1102	}
1103
1104	if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256) {
1105		ret = crypto_register_ahash(&dcp_sha256_alg);
1106		if (ret) {
1107			dev_err(dev, "Failed to register %s hash!\n",
1108				dcp_sha256_alg.halg.base.cra_name);
1109			goto err_unregister_sha1;
1110		}
1111	}
1112
1113	return 0;
1114
1115err_unregister_sha1:
1116	if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
1117		crypto_unregister_ahash(&dcp_sha1_alg);
1118
1119err_unregister_aes:
1120	if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
1121		crypto_unregister_skciphers(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
1122
1123err_destroy_aes_thread:
1124	kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
1125
1126err_destroy_sha_thread:
1127	kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
1128
1129err_disable_unprepare_clk:
1130	clk_disable_unprepare(sdcp->dcp_clk);
1131
1132	return ret;
1133}
1134
1135static int mxs_dcp_remove(struct platform_device *pdev)
1136{
1137	struct dcp *sdcp = platform_get_drvdata(pdev);
1138
1139	if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA256)
1140		crypto_unregister_ahash(&dcp_sha256_alg);
1141
1142	if (sdcp->caps & MXS_DCP_CAPABILITY1_SHA1)
1143		crypto_unregister_ahash(&dcp_sha1_alg);
1144
1145	if (sdcp->caps & MXS_DCP_CAPABILITY1_AES128)
1146		crypto_unregister_skciphers(dcp_aes_algs, ARRAY_SIZE(dcp_aes_algs));
1147
1148	kthread_stop(sdcp->thread[DCP_CHAN_HASH_SHA]);
1149	kthread_stop(sdcp->thread[DCP_CHAN_CRYPTO]);
1150
1151	clk_disable_unprepare(sdcp->dcp_clk);
1152
1153	platform_set_drvdata(pdev, NULL);
1154
1155	global_sdcp = NULL;
1156
1157	return 0;
1158}
1159
1160static const struct of_device_id mxs_dcp_dt_ids[] = {
1161	{ .compatible = "fsl,imx23-dcp", .data = NULL, },
1162	{ .compatible = "fsl,imx28-dcp", .data = NULL, },
1163	{ /* sentinel */ }
1164};
1165
1166MODULE_DEVICE_TABLE(of, mxs_dcp_dt_ids);
1167
1168static struct platform_driver mxs_dcp_driver = {
1169	.probe	= mxs_dcp_probe,
1170	.remove	= mxs_dcp_remove,
1171	.driver	= {
1172		.name		= "mxs-dcp",
1173		.of_match_table	= mxs_dcp_dt_ids,
1174	},
1175};
1176
1177module_platform_driver(mxs_dcp_driver);
1178
1179MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
1180MODULE_DESCRIPTION("Freescale MXS DCP Driver");
1181MODULE_LICENSE("GPL");
1182MODULE_ALIAS("platform:mxs-dcp");