Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * talitos - Freescale Integrated Security Engine (SEC) device driver
   4 *
   5 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
   6 *
   7 * Scatterlist Crypto API glue code copied from files with the following:
   8 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
   9 *
  10 * Crypto algorithm registration code copied from hifn driver:
  11 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
  12 * All rights reserved.
  13 */
  14
  15#include <linux/kernel.h>
  16#include <linux/module.h>
  17#include <linux/mod_devicetable.h>
  18#include <linux/device.h>
  19#include <linux/interrupt.h>
  20#include <linux/crypto.h>
  21#include <linux/hw_random.h>
  22#include <linux/of.h>
  23#include <linux/of_irq.h>
  24#include <linux/platform_device.h>
  25#include <linux/dma-mapping.h>
  26#include <linux/io.h>
  27#include <linux/spinlock.h>
  28#include <linux/rtnetlink.h>
  29#include <linux/slab.h>
  30
  31#include <crypto/algapi.h>
  32#include <crypto/aes.h>
  33#include <crypto/internal/des.h>
  34#include <crypto/sha1.h>
  35#include <crypto/sha2.h>
  36#include <crypto/md5.h>
  37#include <crypto/internal/aead.h>
  38#include <crypto/authenc.h>
  39#include <crypto/internal/skcipher.h>
  40#include <crypto/hash.h>
  41#include <crypto/internal/hash.h>
  42#include <crypto/scatterwalk.h>
  43
  44#include "talitos.h"
  45
  46static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
  47			   unsigned int len, bool is_sec1)
  48{
  49	ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
  50	if (is_sec1) {
  51		ptr->len1 = cpu_to_be16(len);
  52	} else {
  53		ptr->len = cpu_to_be16(len);
  54		ptr->eptr = upper_32_bits(dma_addr);
  55	}
  56}
  57
  58static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
  59			     struct talitos_ptr *src_ptr, bool is_sec1)
  60{
  61	dst_ptr->ptr = src_ptr->ptr;
  62	if (is_sec1) {
  63		dst_ptr->len1 = src_ptr->len1;
  64	} else {
  65		dst_ptr->len = src_ptr->len;
  66		dst_ptr->eptr = src_ptr->eptr;
  67	}
  68}
  69
  70static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
  71					   bool is_sec1)
  72{
  73	if (is_sec1)
  74		return be16_to_cpu(ptr->len1);
  75	else
  76		return be16_to_cpu(ptr->len);
  77}
  78
  79static void to_talitos_ptr_ext_set(struct talitos_ptr *ptr, u8 val,
  80				   bool is_sec1)
  81{
  82	if (!is_sec1)
  83		ptr->j_extent = val;
  84}
  85
  86static void to_talitos_ptr_ext_or(struct talitos_ptr *ptr, u8 val, bool is_sec1)
  87{
  88	if (!is_sec1)
  89		ptr->j_extent |= val;
  90}
  91
  92/*
  93 * map virtual single (contiguous) pointer to h/w descriptor pointer
  94 */
  95static void __map_single_talitos_ptr(struct device *dev,
  96				     struct talitos_ptr *ptr,
  97				     unsigned int len, void *data,
  98				     enum dma_data_direction dir,
  99				     unsigned long attrs)
 100{
 101	dma_addr_t dma_addr = dma_map_single_attrs(dev, data, len, dir, attrs);
 102	struct talitos_private *priv = dev_get_drvdata(dev);
 103	bool is_sec1 = has_ftr_sec1(priv);
 104
 105	to_talitos_ptr(ptr, dma_addr, len, is_sec1);
 106}
 107
 108static void map_single_talitos_ptr(struct device *dev,
 109				   struct talitos_ptr *ptr,
 110				   unsigned int len, void *data,
 111				   enum dma_data_direction dir)
 112{
 113	__map_single_talitos_ptr(dev, ptr, len, data, dir, 0);
 114}
 115
 116static void map_single_talitos_ptr_nosync(struct device *dev,
 117					  struct talitos_ptr *ptr,
 118					  unsigned int len, void *data,
 119					  enum dma_data_direction dir)
 120{
 121	__map_single_talitos_ptr(dev, ptr, len, data, dir,
 122				 DMA_ATTR_SKIP_CPU_SYNC);
 123}
 124
 125/*
 126 * unmap bus single (contiguous) h/w descriptor pointer
 127 */
 128static void unmap_single_talitos_ptr(struct device *dev,
 129				     struct talitos_ptr *ptr,
 130				     enum dma_data_direction dir)
 131{
 132	struct talitos_private *priv = dev_get_drvdata(dev);
 133	bool is_sec1 = has_ftr_sec1(priv);
 134
 135	dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
 136			 from_talitos_ptr_len(ptr, is_sec1), dir);
 137}
 138
 139static int reset_channel(struct device *dev, int ch)
 140{
 141	struct talitos_private *priv = dev_get_drvdata(dev);
 142	unsigned int timeout = TALITOS_TIMEOUT;
 143	bool is_sec1 = has_ftr_sec1(priv);
 144
 145	if (is_sec1) {
 146		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
 147			  TALITOS1_CCCR_LO_RESET);
 148
 149		while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
 150			TALITOS1_CCCR_LO_RESET) && --timeout)
 151			cpu_relax();
 152	} else {
 153		setbits32(priv->chan[ch].reg + TALITOS_CCCR,
 154			  TALITOS2_CCCR_RESET);
 155
 156		while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
 157			TALITOS2_CCCR_RESET) && --timeout)
 158			cpu_relax();
 159	}
 160
 161	if (timeout == 0) {
 162		dev_err(dev, "failed to reset channel %d\n", ch);
 163		return -EIO;
 164	}
 165
 166	/* set 36-bit addressing, done writeback enable and done IRQ enable */
 167	setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
 168		  TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
 169	/* enable chaining descriptors */
 170	if (is_sec1)
 171		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
 172			  TALITOS_CCCR_LO_NE);
 173
 174	/* and ICCR writeback, if available */
 175	if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
 176		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
 177		          TALITOS_CCCR_LO_IWSE);
 178
 179	return 0;
 180}
 181
 182static int reset_device(struct device *dev)
 183{
 184	struct talitos_private *priv = dev_get_drvdata(dev);
 185	unsigned int timeout = TALITOS_TIMEOUT;
 186	bool is_sec1 = has_ftr_sec1(priv);
 187	u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
 188
 189	setbits32(priv->reg + TALITOS_MCR, mcr);
 190
 191	while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
 192	       && --timeout)
 193		cpu_relax();
 194
 195	if (priv->irq[1]) {
 196		mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
 197		setbits32(priv->reg + TALITOS_MCR, mcr);
 198	}
 199
 200	if (timeout == 0) {
 201		dev_err(dev, "failed to reset device\n");
 202		return -EIO;
 203	}
 204
 205	return 0;
 206}
 207
 208/*
 209 * Reset and initialize the device
 210 */
 211static int init_device(struct device *dev)
 212{
 213	struct talitos_private *priv = dev_get_drvdata(dev);
 214	int ch, err;
 215	bool is_sec1 = has_ftr_sec1(priv);
 216
 217	/*
 218	 * Master reset
 219	 * errata documentation: warning: certain SEC interrupts
 220	 * are not fully cleared by writing the MCR:SWR bit,
 221	 * set bit twice to completely reset
 222	 */
 223	err = reset_device(dev);
 224	if (err)
 225		return err;
 226
 227	err = reset_device(dev);
 228	if (err)
 229		return err;
 230
 231	/* reset channels */
 232	for (ch = 0; ch < priv->num_channels; ch++) {
 233		err = reset_channel(dev, ch);
 234		if (err)
 235			return err;
 236	}
 237
 238	/* enable channel done and error interrupts */
 239	if (is_sec1) {
 240		clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
 241		clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
 242		/* disable parity error check in DEU (erroneous? test vect.) */
 243		setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
 244	} else {
 245		setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
 246		setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
 247	}
 248
 249	/* disable integrity check error interrupts (use writeback instead) */
 250	if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
 251		setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
 252		          TALITOS_MDEUICR_LO_ICE);
 253
 254	return 0;
 255}
 256
 257/**
 258 * talitos_submit - submits a descriptor to the device for processing
 259 * @dev:	the SEC device to be used
 260 * @ch:		the SEC device channel to be used
 261 * @desc:	the descriptor to be processed by the device
 262 * @callback:	whom to call when processing is complete
 263 * @context:	a handle for use by caller (optional)
 264 *
 265 * desc must contain valid dma-mapped (bus physical) address pointers.
 266 * callback must check err and feedback in descriptor header
 267 * for device processing status.
 268 */
 269static int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
 270			  void (*callback)(struct device *dev,
 271					   struct talitos_desc *desc,
 272					   void *context, int error),
 273			  void *context)
 274{
 275	struct talitos_private *priv = dev_get_drvdata(dev);
 276	struct talitos_request *request;
 277	unsigned long flags;
 278	int head;
 279	bool is_sec1 = has_ftr_sec1(priv);
 280
 281	spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
 282
 283	if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
 284		/* h/w fifo is full */
 285		spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
 286		return -EAGAIN;
 287	}
 288
 289	head = priv->chan[ch].head;
 290	request = &priv->chan[ch].fifo[head];
 291
 292	/* map descriptor and save caller data */
 293	if (is_sec1) {
 294		desc->hdr1 = desc->hdr;
 295		request->dma_desc = dma_map_single(dev, &desc->hdr1,
 296						   TALITOS_DESC_SIZE,
 297						   DMA_BIDIRECTIONAL);
 298	} else {
 299		request->dma_desc = dma_map_single(dev, desc,
 300						   TALITOS_DESC_SIZE,
 301						   DMA_BIDIRECTIONAL);
 302	}
 303	request->callback = callback;
 304	request->context = context;
 305
 306	/* increment fifo head */
 307	priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
 308
 309	smp_wmb();
 310	request->desc = desc;
 311
 312	/* GO! */
 313	wmb();
 314	out_be32(priv->chan[ch].reg + TALITOS_FF,
 315		 upper_32_bits(request->dma_desc));
 316	out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
 317		 lower_32_bits(request->dma_desc));
 318
 319	spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
 320
 321	return -EINPROGRESS;
 322}
 323
 324static __be32 get_request_hdr(struct talitos_request *request, bool is_sec1)
 325{
 326	struct talitos_edesc *edesc;
 327
 328	if (!is_sec1)
 329		return request->desc->hdr;
 330
 331	if (!request->desc->next_desc)
 332		return request->desc->hdr1;
 333
 334	edesc = container_of(request->desc, struct talitos_edesc, desc);
 335
 336	return ((struct talitos_desc *)(edesc->buf + edesc->dma_len))->hdr1;
 337}
 338
 339/*
 340 * process what was done, notify callback of error if not
 341 */
 342static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
 343{
 344	struct talitos_private *priv = dev_get_drvdata(dev);
 345	struct talitos_request *request, saved_req;
 346	unsigned long flags;
 347	int tail, status;
 348	bool is_sec1 = has_ftr_sec1(priv);
 349
 350	spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
 351
 352	tail = priv->chan[ch].tail;
 353	while (priv->chan[ch].fifo[tail].desc) {
 354		__be32 hdr;
 355
 356		request = &priv->chan[ch].fifo[tail];
 357
 358		/* descriptors with their done bits set don't get the error */
 359		rmb();
 360		hdr = get_request_hdr(request, is_sec1);
 361
 362		if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
 363			status = 0;
 364		else
 365			if (!error)
 366				break;
 367			else
 368				status = error;
 369
 370		dma_unmap_single(dev, request->dma_desc,
 371				 TALITOS_DESC_SIZE,
 372				 DMA_BIDIRECTIONAL);
 373
 374		/* copy entries so we can call callback outside lock */
 375		saved_req.desc = request->desc;
 376		saved_req.callback = request->callback;
 377		saved_req.context = request->context;
 378
 379		/* release request entry in fifo */
 380		smp_wmb();
 381		request->desc = NULL;
 382
 383		/* increment fifo tail */
 384		priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
 385
 386		spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
 387
 388		atomic_dec(&priv->chan[ch].submit_count);
 389
 390		saved_req.callback(dev, saved_req.desc, saved_req.context,
 391				   status);
 392		/* channel may resume processing in single desc error case */
 393		if (error && !reset_ch && status == error)
 394			return;
 395		spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
 396		tail = priv->chan[ch].tail;
 397	}
 398
 399	spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
 400}
 401
 402/*
 403 * process completed requests for channels that have done status
 404 */
 405#define DEF_TALITOS1_DONE(name, ch_done_mask)				\
 406static void talitos1_done_##name(unsigned long data)			\
 407{									\
 408	struct device *dev = (struct device *)data;			\
 409	struct talitos_private *priv = dev_get_drvdata(dev);		\
 410	unsigned long flags;						\
 411									\
 412	if (ch_done_mask & 0x10000000)					\
 413		flush_channel(dev, 0, 0, 0);			\
 414	if (ch_done_mask & 0x40000000)					\
 415		flush_channel(dev, 1, 0, 0);			\
 416	if (ch_done_mask & 0x00010000)					\
 417		flush_channel(dev, 2, 0, 0);			\
 418	if (ch_done_mask & 0x00040000)					\
 419		flush_channel(dev, 3, 0, 0);			\
 420									\
 421	/* At this point, all completed channels have been processed */	\
 422	/* Unmask done interrupts for channels completed later on. */	\
 423	spin_lock_irqsave(&priv->reg_lock, flags);			\
 424	clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);		\
 425	clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);	\
 426	spin_unlock_irqrestore(&priv->reg_lock, flags);			\
 427}
 428
 429DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
 430DEF_TALITOS1_DONE(ch0, TALITOS1_ISR_CH_0_DONE)
 431
 432#define DEF_TALITOS2_DONE(name, ch_done_mask)				\
 433static void talitos2_done_##name(unsigned long data)			\
 434{									\
 435	struct device *dev = (struct device *)data;			\
 436	struct talitos_private *priv = dev_get_drvdata(dev);		\
 437	unsigned long flags;						\
 438									\
 439	if (ch_done_mask & 1)						\
 440		flush_channel(dev, 0, 0, 0);				\
 441	if (ch_done_mask & (1 << 2))					\
 442		flush_channel(dev, 1, 0, 0);				\
 443	if (ch_done_mask & (1 << 4))					\
 444		flush_channel(dev, 2, 0, 0);				\
 445	if (ch_done_mask & (1 << 6))					\
 446		flush_channel(dev, 3, 0, 0);				\
 447									\
 448	/* At this point, all completed channels have been processed */	\
 449	/* Unmask done interrupts for channels completed later on. */	\
 450	spin_lock_irqsave(&priv->reg_lock, flags);			\
 451	setbits32(priv->reg + TALITOS_IMR, ch_done_mask);		\
 452	setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);	\
 453	spin_unlock_irqrestore(&priv->reg_lock, flags);			\
 454}
 455
 456DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
 457DEF_TALITOS2_DONE(ch0, TALITOS2_ISR_CH_0_DONE)
 458DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
 459DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
 460
 461/*
 462 * locate current (offending) descriptor
 463 */
 464static __be32 current_desc_hdr(struct device *dev, int ch)
 465{
 466	struct talitos_private *priv = dev_get_drvdata(dev);
 467	int tail, iter;
 468	dma_addr_t cur_desc;
 469
 470	cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
 471	cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
 472
 473	if (!cur_desc) {
 474		dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
 475		return 0;
 476	}
 477
 478	tail = priv->chan[ch].tail;
 479
 480	iter = tail;
 481	while (priv->chan[ch].fifo[iter].dma_desc != cur_desc &&
 482	       priv->chan[ch].fifo[iter].desc->next_desc != cpu_to_be32(cur_desc)) {
 483		iter = (iter + 1) & (priv->fifo_len - 1);
 484		if (iter == tail) {
 485			dev_err(dev, "couldn't locate current descriptor\n");
 486			return 0;
 487		}
 488	}
 489
 490	if (priv->chan[ch].fifo[iter].desc->next_desc == cpu_to_be32(cur_desc)) {
 491		struct talitos_edesc *edesc;
 492
 493		edesc = container_of(priv->chan[ch].fifo[iter].desc,
 494				     struct talitos_edesc, desc);
 495		return ((struct talitos_desc *)
 496			(edesc->buf + edesc->dma_len))->hdr;
 497	}
 498
 499	return priv->chan[ch].fifo[iter].desc->hdr;
 500}
 501
 502/*
 503 * user diagnostics; report root cause of error based on execution unit status
 504 */
 505static void report_eu_error(struct device *dev, int ch, __be32 desc_hdr)
 506{
 507	struct talitos_private *priv = dev_get_drvdata(dev);
 508	int i;
 509
 510	if (!desc_hdr)
 511		desc_hdr = cpu_to_be32(in_be32(priv->chan[ch].reg + TALITOS_DESCBUF));
 512
 513	switch (desc_hdr & DESC_HDR_SEL0_MASK) {
 514	case DESC_HDR_SEL0_AFEU:
 515		dev_err(dev, "AFEUISR 0x%08x_%08x\n",
 516			in_be32(priv->reg_afeu + TALITOS_EUISR),
 517			in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
 518		break;
 519	case DESC_HDR_SEL0_DEU:
 520		dev_err(dev, "DEUISR 0x%08x_%08x\n",
 521			in_be32(priv->reg_deu + TALITOS_EUISR),
 522			in_be32(priv->reg_deu + TALITOS_EUISR_LO));
 523		break;
 524	case DESC_HDR_SEL0_MDEUA:
 525	case DESC_HDR_SEL0_MDEUB:
 526		dev_err(dev, "MDEUISR 0x%08x_%08x\n",
 527			in_be32(priv->reg_mdeu + TALITOS_EUISR),
 528			in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
 529		break;
 530	case DESC_HDR_SEL0_RNG:
 531		dev_err(dev, "RNGUISR 0x%08x_%08x\n",
 532			in_be32(priv->reg_rngu + TALITOS_ISR),
 533			in_be32(priv->reg_rngu + TALITOS_ISR_LO));
 534		break;
 535	case DESC_HDR_SEL0_PKEU:
 536		dev_err(dev, "PKEUISR 0x%08x_%08x\n",
 537			in_be32(priv->reg_pkeu + TALITOS_EUISR),
 538			in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
 539		break;
 540	case DESC_HDR_SEL0_AESU:
 541		dev_err(dev, "AESUISR 0x%08x_%08x\n",
 542			in_be32(priv->reg_aesu + TALITOS_EUISR),
 543			in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
 544		break;
 545	case DESC_HDR_SEL0_CRCU:
 546		dev_err(dev, "CRCUISR 0x%08x_%08x\n",
 547			in_be32(priv->reg_crcu + TALITOS_EUISR),
 548			in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
 549		break;
 550	case DESC_HDR_SEL0_KEU:
 551		dev_err(dev, "KEUISR 0x%08x_%08x\n",
 552			in_be32(priv->reg_pkeu + TALITOS_EUISR),
 553			in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
 554		break;
 555	}
 556
 557	switch (desc_hdr & DESC_HDR_SEL1_MASK) {
 558	case DESC_HDR_SEL1_MDEUA:
 559	case DESC_HDR_SEL1_MDEUB:
 560		dev_err(dev, "MDEUISR 0x%08x_%08x\n",
 561			in_be32(priv->reg_mdeu + TALITOS_EUISR),
 562			in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
 563		break;
 564	case DESC_HDR_SEL1_CRCU:
 565		dev_err(dev, "CRCUISR 0x%08x_%08x\n",
 566			in_be32(priv->reg_crcu + TALITOS_EUISR),
 567			in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
 568		break;
 569	}
 570
 571	for (i = 0; i < 8; i++)
 572		dev_err(dev, "DESCBUF 0x%08x_%08x\n",
 573			in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
 574			in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
 575}
 576
 577/*
 578 * recover from error interrupts
 579 */
 580static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
 581{
 582	struct talitos_private *priv = dev_get_drvdata(dev);
 583	unsigned int timeout = TALITOS_TIMEOUT;
 584	int ch, error, reset_dev = 0;
 585	u32 v_lo;
 586	bool is_sec1 = has_ftr_sec1(priv);
 587	int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
 588
 589	for (ch = 0; ch < priv->num_channels; ch++) {
 590		/* skip channels without errors */
 591		if (is_sec1) {
 592			/* bits 29, 31, 17, 19 */
 593			if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
 594				continue;
 595		} else {
 596			if (!(isr & (1 << (ch * 2 + 1))))
 597				continue;
 598		}
 599
 600		error = -EINVAL;
 601
 602		v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
 603
 604		if (v_lo & TALITOS_CCPSR_LO_DOF) {
 605			dev_err(dev, "double fetch fifo overflow error\n");
 606			error = -EAGAIN;
 607			reset_ch = 1;
 608		}
 609		if (v_lo & TALITOS_CCPSR_LO_SOF) {
 610			/* h/w dropped descriptor */
 611			dev_err(dev, "single fetch fifo overflow error\n");
 612			error = -EAGAIN;
 613		}
 614		if (v_lo & TALITOS_CCPSR_LO_MDTE)
 615			dev_err(dev, "master data transfer error\n");
 616		if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
 617			dev_err(dev, is_sec1 ? "pointer not complete error\n"
 618					     : "s/g data length zero error\n");
 619		if (v_lo & TALITOS_CCPSR_LO_FPZ)
 620			dev_err(dev, is_sec1 ? "parity error\n"
 621					     : "fetch pointer zero error\n");
 622		if (v_lo & TALITOS_CCPSR_LO_IDH)
 623			dev_err(dev, "illegal descriptor header error\n");
 624		if (v_lo & TALITOS_CCPSR_LO_IEU)
 625			dev_err(dev, is_sec1 ? "static assignment error\n"
 626					     : "invalid exec unit error\n");
 627		if (v_lo & TALITOS_CCPSR_LO_EU)
 628			report_eu_error(dev, ch, current_desc_hdr(dev, ch));
 629		if (!is_sec1) {
 630			if (v_lo & TALITOS_CCPSR_LO_GB)
 631				dev_err(dev, "gather boundary error\n");
 632			if (v_lo & TALITOS_CCPSR_LO_GRL)
 633				dev_err(dev, "gather return/length error\n");
 634			if (v_lo & TALITOS_CCPSR_LO_SB)
 635				dev_err(dev, "scatter boundary error\n");
 636			if (v_lo & TALITOS_CCPSR_LO_SRL)
 637				dev_err(dev, "scatter return/length error\n");
 638		}
 639
 640		flush_channel(dev, ch, error, reset_ch);
 641
 642		if (reset_ch) {
 643			reset_channel(dev, ch);
 644		} else {
 645			setbits32(priv->chan[ch].reg + TALITOS_CCCR,
 646				  TALITOS2_CCCR_CONT);
 647			setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
 648			while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
 649			       TALITOS2_CCCR_CONT) && --timeout)
 650				cpu_relax();
 651			if (timeout == 0) {
 652				dev_err(dev, "failed to restart channel %d\n",
 653					ch);
 654				reset_dev = 1;
 655			}
 656		}
 657	}
 658	if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
 659	    (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
 660		if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
 661			dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
 662				isr, isr_lo);
 663		else
 664			dev_err(dev, "done overflow, internal time out, or "
 665				"rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
 666
 667		/* purge request queues */
 668		for (ch = 0; ch < priv->num_channels; ch++)
 669			flush_channel(dev, ch, -EIO, 1);
 670
 671		/* reset and reinitialize the device */
 672		init_device(dev);
 673	}
 674}
 675
 676#define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)	       \
 677static irqreturn_t talitos1_interrupt_##name(int irq, void *data)	       \
 678{									       \
 679	struct device *dev = data;					       \
 680	struct talitos_private *priv = dev_get_drvdata(dev);		       \
 681	u32 isr, isr_lo;						       \
 682	unsigned long flags;						       \
 683									       \
 684	spin_lock_irqsave(&priv->reg_lock, flags);			       \
 685	isr = in_be32(priv->reg + TALITOS_ISR);				       \
 686	isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);			       \
 687	/* Acknowledge interrupt */					       \
 688	out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
 689	out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);			       \
 690									       \
 691	if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) {    \
 692		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
 693		talitos_error(dev, isr & ch_err_mask, isr_lo);		       \
 694	}								       \
 695	else {								       \
 696		if (likely(isr & ch_done_mask)) {			       \
 697			/* mask further done interrupts. */		       \
 698			setbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
 699			/* done_task will unmask done interrupts at exit */    \
 700			tasklet_schedule(&priv->done_task[tlet]);	       \
 701		}							       \
 702		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
 703	}								       \
 704									       \
 705	return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
 706								IRQ_NONE;      \
 707}
 708
 709DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
 710
 711#define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)	       \
 712static irqreturn_t talitos2_interrupt_##name(int irq, void *data)	       \
 713{									       \
 714	struct device *dev = data;					       \
 715	struct talitos_private *priv = dev_get_drvdata(dev);		       \
 716	u32 isr, isr_lo;						       \
 717	unsigned long flags;						       \
 718									       \
 719	spin_lock_irqsave(&priv->reg_lock, flags);			       \
 720	isr = in_be32(priv->reg + TALITOS_ISR);				       \
 721	isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);			       \
 722	/* Acknowledge interrupt */					       \
 723	out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
 724	out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);			       \
 725									       \
 726	if (unlikely(isr & ch_err_mask || isr_lo)) {			       \
 727		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
 728		talitos_error(dev, isr & ch_err_mask, isr_lo);		       \
 729	}								       \
 730	else {								       \
 731		if (likely(isr & ch_done_mask)) {			       \
 732			/* mask further done interrupts. */		       \
 733			clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
 734			/* done_task will unmask done interrupts at exit */    \
 735			tasklet_schedule(&priv->done_task[tlet]);	       \
 736		}							       \
 737		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
 738	}								       \
 739									       \
 740	return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
 741								IRQ_NONE;      \
 742}
 743
 744DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
 745DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
 746		       0)
 747DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
 748		       1)
 749
 750/*
 751 * hwrng
 752 */
 753static int talitos_rng_data_present(struct hwrng *rng, int wait)
 754{
 755	struct device *dev = (struct device *)rng->priv;
 756	struct talitos_private *priv = dev_get_drvdata(dev);
 757	u32 ofl;
 758	int i;
 759
 760	for (i = 0; i < 20; i++) {
 761		ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
 762		      TALITOS_RNGUSR_LO_OFL;
 763		if (ofl || !wait)
 764			break;
 765		udelay(10);
 766	}
 767
 768	return !!ofl;
 769}
 770
 771static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
 772{
 773	struct device *dev = (struct device *)rng->priv;
 774	struct talitos_private *priv = dev_get_drvdata(dev);
 775
 776	/* rng fifo requires 64-bit accesses */
 777	*data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
 778	*data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
 779
 780	return sizeof(u32);
 781}
 782
 783static int talitos_rng_init(struct hwrng *rng)
 784{
 785	struct device *dev = (struct device *)rng->priv;
 786	struct talitos_private *priv = dev_get_drvdata(dev);
 787	unsigned int timeout = TALITOS_TIMEOUT;
 788
 789	setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
 790	while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
 791		 & TALITOS_RNGUSR_LO_RD)
 792	       && --timeout)
 793		cpu_relax();
 794	if (timeout == 0) {
 795		dev_err(dev, "failed to reset rng hw\n");
 796		return -ENODEV;
 797	}
 798
 799	/* start generating */
 800	setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
 801
 802	return 0;
 803}
 804
 805static int talitos_register_rng(struct device *dev)
 806{
 807	struct talitos_private *priv = dev_get_drvdata(dev);
 808	int err;
 809
 810	priv->rng.name		= dev_driver_string(dev);
 811	priv->rng.init		= talitos_rng_init;
 812	priv->rng.data_present	= talitos_rng_data_present;
 813	priv->rng.data_read	= talitos_rng_data_read;
 814	priv->rng.priv		= (unsigned long)dev;
 815
 816	err = hwrng_register(&priv->rng);
 817	if (!err)
 818		priv->rng_registered = true;
 819
 820	return err;
 821}
 822
 823static void talitos_unregister_rng(struct device *dev)
 824{
 825	struct talitos_private *priv = dev_get_drvdata(dev);
 826
 827	if (!priv->rng_registered)
 828		return;
 829
 830	hwrng_unregister(&priv->rng);
 831	priv->rng_registered = false;
 832}
 833
 834/*
 835 * crypto alg
 836 */
 837#define TALITOS_CRA_PRIORITY		3000
 838/*
 839 * Defines a priority for doing AEAD with descriptors type
 840 * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
 841 */
 842#define TALITOS_CRA_PRIORITY_AEAD_HSNA	(TALITOS_CRA_PRIORITY - 1)
 843#ifdef CONFIG_CRYPTO_DEV_TALITOS2
 844#define TALITOS_MAX_KEY_SIZE		(AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
 845#else
 846#define TALITOS_MAX_KEY_SIZE		(AES_MAX_KEY_SIZE + SHA256_BLOCK_SIZE)
 847#endif
 848#define TALITOS_MAX_IV_LENGTH		16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
 849
 850struct talitos_ctx {
 851	struct device *dev;
 852	int ch;
 853	__be32 desc_hdr_template;
 854	u8 key[TALITOS_MAX_KEY_SIZE];
 855	u8 iv[TALITOS_MAX_IV_LENGTH];
 856	dma_addr_t dma_key;
 857	unsigned int keylen;
 858	unsigned int enckeylen;
 859	unsigned int authkeylen;
 860};
 861
 862#define HASH_MAX_BLOCK_SIZE		SHA512_BLOCK_SIZE
 863#define TALITOS_MDEU_MAX_CONTEXT_SIZE	TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
 864
 865struct talitos_ahash_req_ctx {
 866	u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
 867	unsigned int hw_context_size;
 868	u8 buf[2][HASH_MAX_BLOCK_SIZE];
 869	int buf_idx;
 870	unsigned int swinit;
 871	unsigned int first;
 872	unsigned int last;
 873	unsigned int to_hash_later;
 874	unsigned int nbuf;
 875	struct scatterlist bufsl[2];
 876	struct scatterlist *psrc;
 877};
 878
 879struct talitos_export_state {
 880	u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
 881	u8 buf[HASH_MAX_BLOCK_SIZE];
 882	unsigned int swinit;
 883	unsigned int first;
 884	unsigned int last;
 885	unsigned int to_hash_later;
 886	unsigned int nbuf;
 887};
 888
 889static int aead_setkey(struct crypto_aead *authenc,
 890		       const u8 *key, unsigned int keylen)
 891{
 892	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
 893	struct device *dev = ctx->dev;
 894	struct crypto_authenc_keys keys;
 895
 896	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
 897		goto badkey;
 898
 899	if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
 900		goto badkey;
 901
 902	if (ctx->keylen)
 903		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
 904
 905	memcpy(ctx->key, keys.authkey, keys.authkeylen);
 906	memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
 907
 908	ctx->keylen = keys.authkeylen + keys.enckeylen;
 909	ctx->enckeylen = keys.enckeylen;
 910	ctx->authkeylen = keys.authkeylen;
 911	ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
 912				      DMA_TO_DEVICE);
 913
 914	memzero_explicit(&keys, sizeof(keys));
 915	return 0;
 916
 917badkey:
 
 918	memzero_explicit(&keys, sizeof(keys));
 919	return -EINVAL;
 920}
 921
 922static int aead_des3_setkey(struct crypto_aead *authenc,
 923			    const u8 *key, unsigned int keylen)
 924{
 925	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
 926	struct device *dev = ctx->dev;
 927	struct crypto_authenc_keys keys;
 928	int err;
 929
 930	err = crypto_authenc_extractkeys(&keys, key, keylen);
 931	if (unlikely(err))
 932		goto out;
 933
 934	err = -EINVAL;
 935	if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
 936		goto out;
 937
 938	err = verify_aead_des3_key(authenc, keys.enckey, keys.enckeylen);
 939	if (err)
 940		goto out;
 941
 942	if (ctx->keylen)
 943		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
 944
 945	memcpy(ctx->key, keys.authkey, keys.authkeylen);
 946	memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
 947
 948	ctx->keylen = keys.authkeylen + keys.enckeylen;
 949	ctx->enckeylen = keys.enckeylen;
 950	ctx->authkeylen = keys.authkeylen;
 951	ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
 952				      DMA_TO_DEVICE);
 953
 954out:
 955	memzero_explicit(&keys, sizeof(keys));
 956	return err;
 
 
 
 
 957}
 958
 959static void talitos_sg_unmap(struct device *dev,
 960			     struct talitos_edesc *edesc,
 961			     struct scatterlist *src,
 962			     struct scatterlist *dst,
 963			     unsigned int len, unsigned int offset)
 964{
 965	struct talitos_private *priv = dev_get_drvdata(dev);
 966	bool is_sec1 = has_ftr_sec1(priv);
 967	unsigned int src_nents = edesc->src_nents ? : 1;
 968	unsigned int dst_nents = edesc->dst_nents ? : 1;
 969
 970	if (is_sec1 && dst && dst_nents > 1) {
 971		dma_sync_single_for_device(dev, edesc->dma_link_tbl + offset,
 972					   len, DMA_FROM_DEVICE);
 973		sg_pcopy_from_buffer(dst, dst_nents, edesc->buf + offset, len,
 974				     offset);
 975	}
 976	if (src != dst) {
 977		if (src_nents == 1 || !is_sec1)
 978			dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
 979
 980		if (dst && (dst_nents == 1 || !is_sec1))
 981			dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
 982	} else if (src_nents == 1 || !is_sec1) {
 983		dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
 984	}
 985}
 986
 987static void ipsec_esp_unmap(struct device *dev,
 988			    struct talitos_edesc *edesc,
 989			    struct aead_request *areq, bool encrypt)
 990{
 991	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
 992	struct talitos_ctx *ctx = crypto_aead_ctx(aead);
 993	unsigned int ivsize = crypto_aead_ivsize(aead);
 994	unsigned int authsize = crypto_aead_authsize(aead);
 995	unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
 996	bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP;
 997	struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3];
 998
 999	if (is_ipsec_esp)
1000		unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
1001					 DMA_FROM_DEVICE);
1002	unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE);
1003
1004	talitos_sg_unmap(dev, edesc, areq->src, areq->dst,
1005			 cryptlen + authsize, areq->assoclen);
1006
1007	if (edesc->dma_len)
1008		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1009				 DMA_BIDIRECTIONAL);
1010
1011	if (!is_ipsec_esp) {
1012		unsigned int dst_nents = edesc->dst_nents ? : 1;
1013
1014		sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
1015				   areq->assoclen + cryptlen - ivsize);
1016	}
1017}
1018
1019/*
1020 * ipsec_esp descriptor callbacks
1021 */
1022static void ipsec_esp_encrypt_done(struct device *dev,
1023				   struct talitos_desc *desc, void *context,
1024				   int err)
1025{
1026	struct aead_request *areq = context;
1027	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1028	unsigned int ivsize = crypto_aead_ivsize(authenc);
1029	struct talitos_edesc *edesc;
1030
1031	edesc = container_of(desc, struct talitos_edesc, desc);
1032
1033	ipsec_esp_unmap(dev, edesc, areq, true);
1034
1035	dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
1036
1037	kfree(edesc);
1038
1039	aead_request_complete(areq, err);
1040}
1041
1042static void ipsec_esp_decrypt_swauth_done(struct device *dev,
1043					  struct talitos_desc *desc,
1044					  void *context, int err)
1045{
1046	struct aead_request *req = context;
1047	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1048	unsigned int authsize = crypto_aead_authsize(authenc);
1049	struct talitos_edesc *edesc;
1050	char *oicv, *icv;
1051
1052	edesc = container_of(desc, struct talitos_edesc, desc);
1053
1054	ipsec_esp_unmap(dev, edesc, req, false);
1055
1056	if (!err) {
1057		/* auth check */
1058		oicv = edesc->buf + edesc->dma_len;
1059		icv = oicv - authsize;
1060
1061		err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
1062	}
1063
1064	kfree(edesc);
1065
1066	aead_request_complete(req, err);
1067}
1068
1069static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
1070					  struct talitos_desc *desc,
1071					  void *context, int err)
1072{
1073	struct aead_request *req = context;
1074	struct talitos_edesc *edesc;
1075
1076	edesc = container_of(desc, struct talitos_edesc, desc);
1077
1078	ipsec_esp_unmap(dev, edesc, req, false);
1079
1080	/* check ICV auth status */
1081	if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1082		     DESC_HDR_LO_ICCR1_PASS))
1083		err = -EBADMSG;
1084
1085	kfree(edesc);
1086
1087	aead_request_complete(req, err);
1088}
1089
1090/*
1091 * convert scatterlist to SEC h/w link table format
1092 * stop at cryptlen bytes
1093 */
1094static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1095				 unsigned int offset, int datalen, int elen,
1096				 struct talitos_ptr *link_tbl_ptr, int align)
1097{
1098	int n_sg = elen ? sg_count + 1 : sg_count;
1099	int count = 0;
1100	int cryptlen = datalen + elen;
1101	int padding = ALIGN(cryptlen, align) - cryptlen;
1102
1103	while (cryptlen && sg && n_sg--) {
1104		unsigned int len = sg_dma_len(sg);
1105
1106		if (offset >= len) {
1107			offset -= len;
1108			goto next;
1109		}
1110
1111		len -= offset;
1112
1113		if (len > cryptlen)
1114			len = cryptlen;
1115
1116		if (datalen > 0 && len > datalen) {
1117			to_talitos_ptr(link_tbl_ptr + count,
1118				       sg_dma_address(sg) + offset, datalen, 0);
1119			to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1120			count++;
1121			len -= datalen;
1122			offset += datalen;
1123		}
1124		to_talitos_ptr(link_tbl_ptr + count,
1125			       sg_dma_address(sg) + offset, sg_next(sg) ? len : len + padding, 0);
1126		to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1127		count++;
1128		cryptlen -= len;
1129		datalen -= len;
1130		offset = 0;
1131
1132next:
1133		sg = sg_next(sg);
1134	}
1135
1136	/* tag end of link table */
1137	if (count > 0)
1138		to_talitos_ptr_ext_set(link_tbl_ptr + count - 1,
1139				       DESC_PTR_LNKTBL_RET, 0);
1140
1141	return count;
1142}
1143
1144static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
1145			      unsigned int len, struct talitos_edesc *edesc,
1146			      struct talitos_ptr *ptr, int sg_count,
1147			      unsigned int offset, int tbl_off, int elen,
1148			      bool force, int align)
1149{
1150	struct talitos_private *priv = dev_get_drvdata(dev);
1151	bool is_sec1 = has_ftr_sec1(priv);
1152	int aligned_len = ALIGN(len, align);
1153
1154	if (!src) {
1155		to_talitos_ptr(ptr, 0, 0, is_sec1);
1156		return 1;
1157	}
1158	to_talitos_ptr_ext_set(ptr, elen, is_sec1);
1159	if (sg_count == 1 && !force) {
1160		to_talitos_ptr(ptr, sg_dma_address(src) + offset, aligned_len, is_sec1);
1161		return sg_count;
1162	}
1163	if (is_sec1) {
1164		to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, aligned_len, is_sec1);
1165		return sg_count;
1166	}
1167	sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len, elen,
1168					 &edesc->link_tbl[tbl_off], align);
1169	if (sg_count == 1 && !force) {
1170		/* Only one segment now, so no link tbl needed*/
1171		copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
1172		return sg_count;
1173	}
1174	to_talitos_ptr(ptr, edesc->dma_link_tbl +
1175			    tbl_off * sizeof(struct talitos_ptr), aligned_len, is_sec1);
1176	to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
1177
1178	return sg_count;
1179}
1180
1181static int talitos_sg_map(struct device *dev, struct scatterlist *src,
1182			  unsigned int len, struct talitos_edesc *edesc,
1183			  struct talitos_ptr *ptr, int sg_count,
1184			  unsigned int offset, int tbl_off)
1185{
1186	return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
1187				  tbl_off, 0, false, 1);
1188}
1189
1190/*
1191 * fill in and submit ipsec_esp descriptor
1192 */
1193static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1194		     bool encrypt,
1195		     void (*callback)(struct device *dev,
1196				      struct talitos_desc *desc,
1197				      void *context, int error))
1198{
1199	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1200	unsigned int authsize = crypto_aead_authsize(aead);
1201	struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1202	struct device *dev = ctx->dev;
1203	struct talitos_desc *desc = &edesc->desc;
1204	unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
1205	unsigned int ivsize = crypto_aead_ivsize(aead);
1206	int tbl_off = 0;
1207	int sg_count, ret;
1208	int elen = 0;
1209	bool sync_needed = false;
1210	struct talitos_private *priv = dev_get_drvdata(dev);
1211	bool is_sec1 = has_ftr_sec1(priv);
1212	bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP;
1213	struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3];
1214	struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2];
1215	dma_addr_t dma_icv = edesc->dma_link_tbl + edesc->dma_len - authsize;
1216
1217	/* hmac key */
1218	to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1);
1219
1220	sg_count = edesc->src_nents ?: 1;
1221	if (is_sec1 && sg_count > 1)
1222		sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1223				  areq->assoclen + cryptlen);
1224	else
1225		sg_count = dma_map_sg(dev, areq->src, sg_count,
1226				      (areq->src == areq->dst) ?
1227				      DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1228
1229	/* hmac data */
1230	ret = talitos_sg_map(dev, areq->src, areq->assoclen, edesc,
1231			     &desc->ptr[1], sg_count, 0, tbl_off);
1232
1233	if (ret > 1) {
1234		tbl_off += ret;
1235		sync_needed = true;
1236	}
1237
1238	/* cipher iv */
1239	to_talitos_ptr(civ_ptr, edesc->iv_dma, ivsize, is_sec1);
1240
1241	/* cipher key */
1242	to_talitos_ptr(ckey_ptr, ctx->dma_key  + ctx->authkeylen,
1243		       ctx->enckeylen, is_sec1);
1244
1245	/*
1246	 * cipher in
1247	 * map and adjust cipher len to aead request cryptlen.
1248	 * extent is bytes of HMAC postpended to ciphertext,
1249	 * typically 12 for ipsec
1250	 */
1251	if (is_ipsec_esp && (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
1252		elen = authsize;
1253
1254	ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
1255				 sg_count, areq->assoclen, tbl_off, elen,
1256				 false, 1);
1257
1258	if (ret > 1) {
1259		tbl_off += ret;
1260		sync_needed = true;
1261	}
1262
1263	/* cipher out */
1264	if (areq->src != areq->dst) {
1265		sg_count = edesc->dst_nents ? : 1;
1266		if (!is_sec1 || sg_count == 1)
1267			dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1268	}
1269
1270	if (is_ipsec_esp && encrypt)
1271		elen = authsize;
1272	else
1273		elen = 0;
1274	ret = talitos_sg_map_ext(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
1275				 sg_count, areq->assoclen, tbl_off, elen,
1276				 is_ipsec_esp && !encrypt, 1);
1277	tbl_off += ret;
1278
1279	if (!encrypt && is_ipsec_esp) {
1280		struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1281
1282		/* Add an entry to the link table for ICV data */
1283		to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
1284		to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RET, is_sec1);
1285
1286		/* icv data follows link tables */
1287		to_talitos_ptr(tbl_ptr, dma_icv, authsize, is_sec1);
1288		to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
1289		sync_needed = true;
1290	} else if (!encrypt) {
1291		to_talitos_ptr(&desc->ptr[6], dma_icv, authsize, is_sec1);
1292		sync_needed = true;
1293	} else if (!is_ipsec_esp) {
1294		talitos_sg_map(dev, areq->dst, authsize, edesc, &desc->ptr[6],
1295			       sg_count, areq->assoclen + cryptlen, tbl_off);
1296	}
1297
1298	/* iv out */
1299	if (is_ipsec_esp)
1300		map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1301				       DMA_FROM_DEVICE);
1302
1303	if (sync_needed)
1304		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1305					   edesc->dma_len,
1306					   DMA_BIDIRECTIONAL);
1307
1308	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1309	if (ret != -EINPROGRESS) {
1310		ipsec_esp_unmap(dev, edesc, areq, encrypt);
1311		kfree(edesc);
1312	}
1313	return ret;
1314}
1315
1316/*
1317 * allocate and map the extended descriptor
1318 */
1319static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1320						 struct scatterlist *src,
1321						 struct scatterlist *dst,
1322						 u8 *iv,
1323						 unsigned int assoclen,
1324						 unsigned int cryptlen,
1325						 unsigned int authsize,
1326						 unsigned int ivsize,
1327						 int icv_stashing,
1328						 u32 cryptoflags,
1329						 bool encrypt)
1330{
1331	struct talitos_edesc *edesc;
1332	int src_nents, dst_nents, alloc_len, dma_len, src_len, dst_len;
1333	dma_addr_t iv_dma = 0;
1334	gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1335		      GFP_ATOMIC;
1336	struct talitos_private *priv = dev_get_drvdata(dev);
1337	bool is_sec1 = has_ftr_sec1(priv);
1338	int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1339
1340	if (cryptlen + authsize > max_len) {
1341		dev_err(dev, "length exceeds h/w max limit\n");
1342		return ERR_PTR(-EINVAL);
1343	}
1344
1345	if (!dst || dst == src) {
1346		src_len = assoclen + cryptlen + authsize;
1347		src_nents = sg_nents_for_len(src, src_len);
1348		if (src_nents < 0) {
1349			dev_err(dev, "Invalid number of src SG.\n");
1350			return ERR_PTR(-EINVAL);
1351		}
1352		src_nents = (src_nents == 1) ? 0 : src_nents;
1353		dst_nents = dst ? src_nents : 0;
1354		dst_len = 0;
1355	} else { /* dst && dst != src*/
1356		src_len = assoclen + cryptlen + (encrypt ? 0 : authsize);
1357		src_nents = sg_nents_for_len(src, src_len);
1358		if (src_nents < 0) {
1359			dev_err(dev, "Invalid number of src SG.\n");
1360			return ERR_PTR(-EINVAL);
1361		}
1362		src_nents = (src_nents == 1) ? 0 : src_nents;
1363		dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
1364		dst_nents = sg_nents_for_len(dst, dst_len);
1365		if (dst_nents < 0) {
1366			dev_err(dev, "Invalid number of dst SG.\n");
1367			return ERR_PTR(-EINVAL);
1368		}
1369		dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1370	}
1371
1372	/*
1373	 * allocate space for base edesc plus the link tables,
1374	 * allowing for two separate entries for AD and generated ICV (+ 2),
1375	 * and space for two sets of ICVs (stashed and generated)
1376	 */
1377	alloc_len = sizeof(struct talitos_edesc);
1378	if (src_nents || dst_nents || !encrypt) {
1379		if (is_sec1)
1380			dma_len = (src_nents ? src_len : 0) +
1381				  (dst_nents ? dst_len : 0) + authsize;
1382		else
1383			dma_len = (src_nents + dst_nents + 2) *
1384				  sizeof(struct talitos_ptr) + authsize;
1385		alloc_len += dma_len;
1386	} else {
1387		dma_len = 0;
1388	}
1389	alloc_len += icv_stashing ? authsize : 0;
1390
1391	/* if its a ahash, add space for a second desc next to the first one */
1392	if (is_sec1 && !dst)
1393		alloc_len += sizeof(struct talitos_desc);
1394	alloc_len += ivsize;
1395
1396	edesc = kmalloc(ALIGN(alloc_len, dma_get_cache_alignment()), flags);
1397	if (!edesc)
1398		return ERR_PTR(-ENOMEM);
1399	if (ivsize) {
1400		iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize);
1401		iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1402	}
1403	memset(&edesc->desc, 0, sizeof(edesc->desc));
1404
1405	edesc->src_nents = src_nents;
1406	edesc->dst_nents = dst_nents;
1407	edesc->iv_dma = iv_dma;
1408	edesc->dma_len = dma_len;
1409	if (dma_len)
1410		edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1411						     edesc->dma_len,
1412						     DMA_BIDIRECTIONAL);
1413
1414	return edesc;
1415}
1416
1417static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1418					      int icv_stashing, bool encrypt)
1419{
1420	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1421	unsigned int authsize = crypto_aead_authsize(authenc);
1422	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1423	unsigned int ivsize = crypto_aead_ivsize(authenc);
1424	unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
1425
1426	return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1427				   iv, areq->assoclen, cryptlen,
1428				   authsize, ivsize, icv_stashing,
1429				   areq->base.flags, encrypt);
1430}
1431
1432static int aead_encrypt(struct aead_request *req)
1433{
1434	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1435	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1436	struct talitos_edesc *edesc;
1437
1438	/* allocate extended descriptor */
1439	edesc = aead_edesc_alloc(req, req->iv, 0, true);
1440	if (IS_ERR(edesc))
1441		return PTR_ERR(edesc);
1442
1443	/* set encrypt */
1444	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1445
1446	return ipsec_esp(edesc, req, true, ipsec_esp_encrypt_done);
1447}
1448
1449static int aead_decrypt(struct aead_request *req)
1450{
1451	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1452	unsigned int authsize = crypto_aead_authsize(authenc);
1453	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1454	struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1455	struct talitos_edesc *edesc;
1456	void *icvdata;
1457
1458	/* allocate extended descriptor */
1459	edesc = aead_edesc_alloc(req, req->iv, 1, false);
1460	if (IS_ERR(edesc))
1461		return PTR_ERR(edesc);
1462
1463	if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
1464	    (priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1465	    ((!edesc->src_nents && !edesc->dst_nents) ||
1466	     priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1467
1468		/* decrypt and check the ICV */
1469		edesc->desc.hdr = ctx->desc_hdr_template |
1470				  DESC_HDR_DIR_INBOUND |
1471				  DESC_HDR_MODE1_MDEU_CICV;
1472
1473		/* reset integrity check result bits */
1474
1475		return ipsec_esp(edesc, req, false,
1476				 ipsec_esp_decrypt_hwauth_done);
1477	}
1478
1479	/* Have to check the ICV with software */
1480	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1481
1482	/* stash incoming ICV for later cmp with ICV generated by the h/w */
1483	icvdata = edesc->buf + edesc->dma_len;
1484
1485	sg_pcopy_to_buffer(req->src, edesc->src_nents ? : 1, icvdata, authsize,
1486			   req->assoclen + req->cryptlen - authsize);
1487
1488	return ipsec_esp(edesc, req, false, ipsec_esp_decrypt_swauth_done);
1489}
1490
1491static int skcipher_setkey(struct crypto_skcipher *cipher,
1492			     const u8 *key, unsigned int keylen)
1493{
1494	struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1495	struct device *dev = ctx->dev;
1496
1497	if (ctx->keylen)
1498		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
1499
1500	memcpy(&ctx->key, key, keylen);
1501	ctx->keylen = keylen;
1502
1503	ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
1504
1505	return 0;
1506}
1507
1508static int skcipher_des_setkey(struct crypto_skcipher *cipher,
1509				 const u8 *key, unsigned int keylen)
1510{
1511	return verify_skcipher_des_key(cipher, key) ?:
1512	       skcipher_setkey(cipher, key, keylen);
1513}
1514
1515static int skcipher_des3_setkey(struct crypto_skcipher *cipher,
1516				  const u8 *key, unsigned int keylen)
1517{
1518	return verify_skcipher_des3_key(cipher, key) ?:
1519	       skcipher_setkey(cipher, key, keylen);
1520}
1521
1522static int skcipher_aes_setkey(struct crypto_skcipher *cipher,
1523				  const u8 *key, unsigned int keylen)
1524{
1525	if (keylen == AES_KEYSIZE_128 || keylen == AES_KEYSIZE_192 ||
1526	    keylen == AES_KEYSIZE_256)
1527		return skcipher_setkey(cipher, key, keylen);
 
 
1528
1529	return -EINVAL;
1530}
1531
1532static void common_nonsnoop_unmap(struct device *dev,
1533				  struct talitos_edesc *edesc,
1534				  struct skcipher_request *areq)
1535{
1536	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1537
1538	talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen, 0);
1539	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1540
1541	if (edesc->dma_len)
1542		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1543				 DMA_BIDIRECTIONAL);
1544}
1545
1546static void skcipher_done(struct device *dev,
1547			    struct talitos_desc *desc, void *context,
1548			    int err)
1549{
1550	struct skcipher_request *areq = context;
1551	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1552	struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1553	unsigned int ivsize = crypto_skcipher_ivsize(cipher);
1554	struct talitos_edesc *edesc;
1555
1556	edesc = container_of(desc, struct talitos_edesc, desc);
1557
1558	common_nonsnoop_unmap(dev, edesc, areq);
1559	memcpy(areq->iv, ctx->iv, ivsize);
1560
1561	kfree(edesc);
1562
1563	skcipher_request_complete(areq, err);
1564}
1565
1566static int common_nonsnoop(struct talitos_edesc *edesc,
1567			   struct skcipher_request *areq,
1568			   void (*callback) (struct device *dev,
1569					     struct talitos_desc *desc,
1570					     void *context, int error))
1571{
1572	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1573	struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1574	struct device *dev = ctx->dev;
1575	struct talitos_desc *desc = &edesc->desc;
1576	unsigned int cryptlen = areq->cryptlen;
1577	unsigned int ivsize = crypto_skcipher_ivsize(cipher);
1578	int sg_count, ret;
1579	bool sync_needed = false;
1580	struct talitos_private *priv = dev_get_drvdata(dev);
1581	bool is_sec1 = has_ftr_sec1(priv);
1582	bool is_ctr = (desc->hdr & DESC_HDR_SEL0_MASK) == DESC_HDR_SEL0_AESU &&
1583		      (desc->hdr & DESC_HDR_MODE0_AESU_MASK) == DESC_HDR_MODE0_AESU_CTR;
1584
1585	/* first DWORD empty */
1586
1587	/* cipher iv */
1588	to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, ivsize, is_sec1);
1589
1590	/* cipher key */
1591	to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen, is_sec1);
1592
1593	sg_count = edesc->src_nents ?: 1;
1594	if (is_sec1 && sg_count > 1)
1595		sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1596				  cryptlen);
1597	else
1598		sg_count = dma_map_sg(dev, areq->src, sg_count,
1599				      (areq->src == areq->dst) ?
1600				      DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1601	/*
1602	 * cipher in
1603	 */
1604	sg_count = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[3],
1605				      sg_count, 0, 0, 0, false, is_ctr ? 16 : 1);
1606	if (sg_count > 1)
1607		sync_needed = true;
1608
1609	/* cipher out */
1610	if (areq->src != areq->dst) {
1611		sg_count = edesc->dst_nents ? : 1;
1612		if (!is_sec1 || sg_count == 1)
1613			dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1614	}
1615
1616	ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[4],
1617			     sg_count, 0, (edesc->src_nents + 1));
1618	if (ret > 1)
1619		sync_needed = true;
1620
1621	/* iv out */
1622	map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1623			       DMA_FROM_DEVICE);
1624
1625	/* last DWORD empty */
1626
1627	if (sync_needed)
1628		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1629					   edesc->dma_len, DMA_BIDIRECTIONAL);
1630
1631	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1632	if (ret != -EINPROGRESS) {
1633		common_nonsnoop_unmap(dev, edesc, areq);
1634		kfree(edesc);
1635	}
1636	return ret;
1637}
1638
1639static struct talitos_edesc *skcipher_edesc_alloc(struct skcipher_request *
1640						    areq, bool encrypt)
1641{
1642	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1643	struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1644	unsigned int ivsize = crypto_skcipher_ivsize(cipher);
1645
1646	return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1647				   areq->iv, 0, areq->cryptlen, 0, ivsize, 0,
1648				   areq->base.flags, encrypt);
1649}
1650
1651static int skcipher_encrypt(struct skcipher_request *areq)
1652{
1653	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1654	struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1655	struct talitos_edesc *edesc;
1656	unsigned int blocksize =
1657			crypto_tfm_alg_blocksize(crypto_skcipher_tfm(cipher));
1658
1659	if (!areq->cryptlen)
1660		return 0;
1661
1662	if (areq->cryptlen % blocksize)
1663		return -EINVAL;
1664
1665	/* allocate extended descriptor */
1666	edesc = skcipher_edesc_alloc(areq, true);
1667	if (IS_ERR(edesc))
1668		return PTR_ERR(edesc);
1669
1670	/* set encrypt */
1671	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1672
1673	return common_nonsnoop(edesc, areq, skcipher_done);
1674}
1675
1676static int skcipher_decrypt(struct skcipher_request *areq)
1677{
1678	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1679	struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1680	struct talitos_edesc *edesc;
1681	unsigned int blocksize =
1682			crypto_tfm_alg_blocksize(crypto_skcipher_tfm(cipher));
1683
1684	if (!areq->cryptlen)
1685		return 0;
1686
1687	if (areq->cryptlen % blocksize)
1688		return -EINVAL;
1689
1690	/* allocate extended descriptor */
1691	edesc = skcipher_edesc_alloc(areq, false);
1692	if (IS_ERR(edesc))
1693		return PTR_ERR(edesc);
1694
1695	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1696
1697	return common_nonsnoop(edesc, areq, skcipher_done);
1698}
1699
1700static void common_nonsnoop_hash_unmap(struct device *dev,
1701				       struct talitos_edesc *edesc,
1702				       struct ahash_request *areq)
1703{
1704	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1705	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1706	struct talitos_private *priv = dev_get_drvdata(dev);
1707	bool is_sec1 = has_ftr_sec1(priv);
1708	struct talitos_desc *desc = &edesc->desc;
1709	struct talitos_desc *desc2 = (struct talitos_desc *)
1710				     (edesc->buf + edesc->dma_len);
1711
1712	unmap_single_talitos_ptr(dev, &desc->ptr[5], DMA_FROM_DEVICE);
1713	if (desc->next_desc &&
1714	    desc->ptr[5].ptr != desc2->ptr[5].ptr)
1715		unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
1716	if (req_ctx->last)
1717		memcpy(areq->result, req_ctx->hw_context,
1718		       crypto_ahash_digestsize(tfm));
1719
1720	if (req_ctx->psrc)
1721		talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
1722
1723	/* When using hashctx-in, must unmap it. */
1724	if (from_talitos_ptr_len(&desc->ptr[1], is_sec1))
1725		unmap_single_talitos_ptr(dev, &desc->ptr[1],
1726					 DMA_TO_DEVICE);
1727	else if (desc->next_desc)
1728		unmap_single_talitos_ptr(dev, &desc2->ptr[1],
1729					 DMA_TO_DEVICE);
1730
1731	if (is_sec1 && req_ctx->nbuf)
1732		unmap_single_talitos_ptr(dev, &desc->ptr[3],
1733					 DMA_TO_DEVICE);
1734
1735	if (edesc->dma_len)
1736		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1737				 DMA_BIDIRECTIONAL);
1738
1739	if (desc->next_desc)
1740		dma_unmap_single(dev, be32_to_cpu(desc->next_desc),
1741				 TALITOS_DESC_SIZE, DMA_BIDIRECTIONAL);
1742}
1743
1744static void ahash_done(struct device *dev,
1745		       struct talitos_desc *desc, void *context,
1746		       int err)
1747{
1748	struct ahash_request *areq = context;
1749	struct talitos_edesc *edesc =
1750		 container_of(desc, struct talitos_edesc, desc);
1751	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1752
1753	if (!req_ctx->last && req_ctx->to_hash_later) {
1754		/* Position any partial block for next update/final/finup */
1755		req_ctx->buf_idx = (req_ctx->buf_idx + 1) & 1;
1756		req_ctx->nbuf = req_ctx->to_hash_later;
1757	}
1758	common_nonsnoop_hash_unmap(dev, edesc, areq);
1759
1760	kfree(edesc);
1761
1762	ahash_request_complete(areq, err);
1763}
1764
1765/*
1766 * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1767 * ourself and submit a padded block
1768 */
1769static void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1770			       struct talitos_edesc *edesc,
1771			       struct talitos_ptr *ptr)
1772{
1773	static u8 padded_hash[64] = {
1774		0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1775		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1776		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1777		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1778	};
1779
1780	pr_err_once("Bug in SEC1, padding ourself\n");
1781	edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1782	map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1783			       (char *)padded_hash, DMA_TO_DEVICE);
1784}
1785
1786static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1787				struct ahash_request *areq, unsigned int length,
1788				void (*callback) (struct device *dev,
1789						  struct talitos_desc *desc,
1790						  void *context, int error))
1791{
1792	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1793	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1794	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1795	struct device *dev = ctx->dev;
1796	struct talitos_desc *desc = &edesc->desc;
1797	int ret;
1798	bool sync_needed = false;
1799	struct talitos_private *priv = dev_get_drvdata(dev);
1800	bool is_sec1 = has_ftr_sec1(priv);
1801	int sg_count;
1802
1803	/* first DWORD empty */
1804
1805	/* hash context in */
1806	if (!req_ctx->first || req_ctx->swinit) {
1807		map_single_talitos_ptr_nosync(dev, &desc->ptr[1],
1808					      req_ctx->hw_context_size,
1809					      req_ctx->hw_context,
1810					      DMA_TO_DEVICE);
1811		req_ctx->swinit = 0;
1812	}
1813	/* Indicate next op is not the first. */
1814	req_ctx->first = 0;
1815
1816	/* HMAC key */
1817	if (ctx->keylen)
1818		to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen,
1819			       is_sec1);
1820
1821	if (is_sec1 && req_ctx->nbuf)
1822		length -= req_ctx->nbuf;
1823
1824	sg_count = edesc->src_nents ?: 1;
1825	if (is_sec1 && sg_count > 1)
1826		sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length);
1827	else if (length)
1828		sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
1829				      DMA_TO_DEVICE);
1830	/*
1831	 * data in
1832	 */
1833	if (is_sec1 && req_ctx->nbuf) {
1834		map_single_talitos_ptr(dev, &desc->ptr[3], req_ctx->nbuf,
1835				       req_ctx->buf[req_ctx->buf_idx],
1836				       DMA_TO_DEVICE);
1837	} else {
1838		sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1839					  &desc->ptr[3], sg_count, 0, 0);
1840		if (sg_count > 1)
1841			sync_needed = true;
1842	}
1843
1844	/* fifth DWORD empty */
1845
1846	/* hash/HMAC out -or- hash context out */
1847	if (req_ctx->last)
1848		map_single_talitos_ptr(dev, &desc->ptr[5],
1849				       crypto_ahash_digestsize(tfm),
1850				       req_ctx->hw_context, DMA_FROM_DEVICE);
1851	else
1852		map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1853					      req_ctx->hw_context_size,
1854					      req_ctx->hw_context,
1855					      DMA_FROM_DEVICE);
1856
1857	/* last DWORD empty */
1858
1859	if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1860		talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1861
1862	if (is_sec1 && req_ctx->nbuf && length) {
1863		struct talitos_desc *desc2 = (struct talitos_desc *)
1864					     (edesc->buf + edesc->dma_len);
1865		dma_addr_t next_desc;
1866
1867		memset(desc2, 0, sizeof(*desc2));
1868		desc2->hdr = desc->hdr;
1869		desc2->hdr &= ~DESC_HDR_MODE0_MDEU_INIT;
1870		desc2->hdr1 = desc2->hdr;
1871		desc->hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1872		desc->hdr |= DESC_HDR_MODE0_MDEU_CONT;
1873		desc->hdr &= ~DESC_HDR_DONE_NOTIFY;
1874
1875		if (desc->ptr[1].ptr)
1876			copy_talitos_ptr(&desc2->ptr[1], &desc->ptr[1],
1877					 is_sec1);
1878		else
1879			map_single_talitos_ptr_nosync(dev, &desc2->ptr[1],
1880						      req_ctx->hw_context_size,
1881						      req_ctx->hw_context,
1882						      DMA_TO_DEVICE);
1883		copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
1884		sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1885					  &desc2->ptr[3], sg_count, 0, 0);
1886		if (sg_count > 1)
1887			sync_needed = true;
1888		copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
1889		if (req_ctx->last)
1890			map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1891						      req_ctx->hw_context_size,
1892						      req_ctx->hw_context,
1893						      DMA_FROM_DEVICE);
1894
1895		next_desc = dma_map_single(dev, &desc2->hdr1, TALITOS_DESC_SIZE,
1896					   DMA_BIDIRECTIONAL);
1897		desc->next_desc = cpu_to_be32(next_desc);
1898	}
1899
1900	if (sync_needed)
1901		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1902					   edesc->dma_len, DMA_BIDIRECTIONAL);
1903
1904	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1905	if (ret != -EINPROGRESS) {
1906		common_nonsnoop_hash_unmap(dev, edesc, areq);
1907		kfree(edesc);
1908	}
1909	return ret;
1910}
1911
1912static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1913					       unsigned int nbytes)
1914{
1915	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1916	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1917	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1918	struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1919	bool is_sec1 = has_ftr_sec1(priv);
1920
1921	if (is_sec1)
1922		nbytes -= req_ctx->nbuf;
1923
1924	return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
1925				   nbytes, 0, 0, 0, areq->base.flags, false);
1926}
1927
1928static int ahash_init(struct ahash_request *areq)
1929{
1930	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1931	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1932	struct device *dev = ctx->dev;
1933	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1934	unsigned int size;
1935	dma_addr_t dma;
1936
1937	/* Initialize the context */
1938	req_ctx->buf_idx = 0;
1939	req_ctx->nbuf = 0;
1940	req_ctx->first = 1; /* first indicates h/w must init its context */
1941	req_ctx->swinit = 0; /* assume h/w init of context */
1942	size =	(crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1943			? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1944			: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1945	req_ctx->hw_context_size = size;
1946
1947	dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
1948			     DMA_TO_DEVICE);
1949	dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
1950
1951	return 0;
1952}
1953
1954/*
1955 * on h/w without explicit sha224 support, we initialize h/w context
1956 * manually with sha224 constants, and tell it to run sha256.
1957 */
1958static int ahash_init_sha224_swinit(struct ahash_request *areq)
1959{
1960	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1961
1962	req_ctx->hw_context[0] = SHA224_H0;
1963	req_ctx->hw_context[1] = SHA224_H1;
1964	req_ctx->hw_context[2] = SHA224_H2;
1965	req_ctx->hw_context[3] = SHA224_H3;
1966	req_ctx->hw_context[4] = SHA224_H4;
1967	req_ctx->hw_context[5] = SHA224_H5;
1968	req_ctx->hw_context[6] = SHA224_H6;
1969	req_ctx->hw_context[7] = SHA224_H7;
1970
1971	/* init 64-bit count */
1972	req_ctx->hw_context[8] = 0;
1973	req_ctx->hw_context[9] = 0;
1974
1975	ahash_init(areq);
1976	req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1977
1978	return 0;
1979}
1980
1981static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1982{
1983	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1984	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1985	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1986	struct talitos_edesc *edesc;
1987	unsigned int blocksize =
1988			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1989	unsigned int nbytes_to_hash;
1990	unsigned int to_hash_later;
1991	unsigned int nsg;
1992	int nents;
1993	struct device *dev = ctx->dev;
1994	struct talitos_private *priv = dev_get_drvdata(dev);
1995	bool is_sec1 = has_ftr_sec1(priv);
1996	u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
1997
1998	if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1999		/* Buffer up to one whole block */
2000		nents = sg_nents_for_len(areq->src, nbytes);
2001		if (nents < 0) {
2002			dev_err(dev, "Invalid number of src SG.\n");
2003			return nents;
2004		}
2005		sg_copy_to_buffer(areq->src, nents,
2006				  ctx_buf + req_ctx->nbuf, nbytes);
2007		req_ctx->nbuf += nbytes;
2008		return 0;
2009	}
2010
2011	/* At least (blocksize + 1) bytes are available to hash */
2012	nbytes_to_hash = nbytes + req_ctx->nbuf;
2013	to_hash_later = nbytes_to_hash & (blocksize - 1);
2014
2015	if (req_ctx->last)
2016		to_hash_later = 0;
2017	else if (to_hash_later)
2018		/* There is a partial block. Hash the full block(s) now */
2019		nbytes_to_hash -= to_hash_later;
2020	else {
2021		/* Keep one block buffered */
2022		nbytes_to_hash -= blocksize;
2023		to_hash_later = blocksize;
2024	}
2025
2026	/* Chain in any previously buffered data */
2027	if (!is_sec1 && req_ctx->nbuf) {
2028		nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
2029		sg_init_table(req_ctx->bufsl, nsg);
2030		sg_set_buf(req_ctx->bufsl, ctx_buf, req_ctx->nbuf);
2031		if (nsg > 1)
2032			sg_chain(req_ctx->bufsl, 2, areq->src);
2033		req_ctx->psrc = req_ctx->bufsl;
2034	} else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
2035		int offset;
2036
2037		if (nbytes_to_hash > blocksize)
2038			offset = blocksize - req_ctx->nbuf;
2039		else
2040			offset = nbytes_to_hash - req_ctx->nbuf;
2041		nents = sg_nents_for_len(areq->src, offset);
2042		if (nents < 0) {
2043			dev_err(dev, "Invalid number of src SG.\n");
2044			return nents;
2045		}
2046		sg_copy_to_buffer(areq->src, nents,
2047				  ctx_buf + req_ctx->nbuf, offset);
2048		req_ctx->nbuf += offset;
2049		req_ctx->psrc = scatterwalk_ffwd(req_ctx->bufsl, areq->src,
2050						 offset);
2051	} else
2052		req_ctx->psrc = areq->src;
2053
2054	if (to_hash_later) {
2055		nents = sg_nents_for_len(areq->src, nbytes);
2056		if (nents < 0) {
2057			dev_err(dev, "Invalid number of src SG.\n");
2058			return nents;
2059		}
2060		sg_pcopy_to_buffer(areq->src, nents,
2061				   req_ctx->buf[(req_ctx->buf_idx + 1) & 1],
2062				      to_hash_later,
2063				      nbytes - to_hash_later);
2064	}
2065	req_ctx->to_hash_later = to_hash_later;
2066
2067	/* Allocate extended descriptor */
2068	edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
2069	if (IS_ERR(edesc))
2070		return PTR_ERR(edesc);
2071
2072	edesc->desc.hdr = ctx->desc_hdr_template;
2073
2074	/* On last one, request SEC to pad; otherwise continue */
2075	if (req_ctx->last)
2076		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
2077	else
2078		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
2079
2080	/* request SEC to INIT hash. */
2081	if (req_ctx->first && !req_ctx->swinit)
2082		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
2083
2084	/* When the tfm context has a keylen, it's an HMAC.
2085	 * A first or last (ie. not middle) descriptor must request HMAC.
2086	 */
2087	if (ctx->keylen && (req_ctx->first || req_ctx->last))
2088		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
2089
2090	return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, ahash_done);
2091}
2092
2093static int ahash_update(struct ahash_request *areq)
2094{
2095	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2096
2097	req_ctx->last = 0;
2098
2099	return ahash_process_req(areq, areq->nbytes);
2100}
2101
2102static int ahash_final(struct ahash_request *areq)
2103{
2104	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2105
2106	req_ctx->last = 1;
2107
2108	return ahash_process_req(areq, 0);
2109}
2110
2111static int ahash_finup(struct ahash_request *areq)
2112{
2113	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2114
2115	req_ctx->last = 1;
2116
2117	return ahash_process_req(areq, areq->nbytes);
2118}
2119
2120static int ahash_digest(struct ahash_request *areq)
2121{
2122	ahash_init(areq);
2123	return ahash_finup(areq);
2124}
2125
2126static int ahash_digest_sha224_swinit(struct ahash_request *areq)
2127{
2128	ahash_init_sha224_swinit(areq);
2129	return ahash_finup(areq);
2130}
2131
2132static int ahash_export(struct ahash_request *areq, void *out)
2133{
2134	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2135	struct talitos_export_state *export = out;
2136	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2137	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2138	struct device *dev = ctx->dev;
2139	dma_addr_t dma;
2140
2141	dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2142			     DMA_FROM_DEVICE);
2143	dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_FROM_DEVICE);
2144
2145	memcpy(export->hw_context, req_ctx->hw_context,
2146	       req_ctx->hw_context_size);
2147	memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf);
2148	export->swinit = req_ctx->swinit;
2149	export->first = req_ctx->first;
2150	export->last = req_ctx->last;
2151	export->to_hash_later = req_ctx->to_hash_later;
2152	export->nbuf = req_ctx->nbuf;
2153
2154	return 0;
2155}
2156
2157static int ahash_import(struct ahash_request *areq, const void *in)
2158{
2159	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2160	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2161	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2162	struct device *dev = ctx->dev;
2163	const struct talitos_export_state *export = in;
2164	unsigned int size;
2165	dma_addr_t dma;
2166
2167	memset(req_ctx, 0, sizeof(*req_ctx));
2168	size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
2169			? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2170			: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
2171	req_ctx->hw_context_size = size;
2172	memcpy(req_ctx->hw_context, export->hw_context, size);
2173	memcpy(req_ctx->buf[0], export->buf, export->nbuf);
2174	req_ctx->swinit = export->swinit;
2175	req_ctx->first = export->first;
2176	req_ctx->last = export->last;
2177	req_ctx->to_hash_later = export->to_hash_later;
2178	req_ctx->nbuf = export->nbuf;
2179
2180	dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2181			     DMA_TO_DEVICE);
2182	dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
2183
2184	return 0;
2185}
2186
2187static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2188		   u8 *hash)
2189{
2190	struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2191
2192	struct scatterlist sg[1];
2193	struct ahash_request *req;
2194	struct crypto_wait wait;
2195	int ret;
2196
2197	crypto_init_wait(&wait);
2198
2199	req = ahash_request_alloc(tfm, GFP_KERNEL);
2200	if (!req)
2201		return -ENOMEM;
2202
2203	/* Keep tfm keylen == 0 during hash of the long key */
2204	ctx->keylen = 0;
2205	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2206				   crypto_req_done, &wait);
2207
2208	sg_init_one(&sg[0], key, keylen);
2209
2210	ahash_request_set_crypt(req, sg, hash, keylen);
2211	ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
2212
2213	ahash_request_free(req);
2214
2215	return ret;
2216}
2217
2218static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2219			unsigned int keylen)
2220{
2221	struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2222	struct device *dev = ctx->dev;
2223	unsigned int blocksize =
2224			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2225	unsigned int digestsize = crypto_ahash_digestsize(tfm);
2226	unsigned int keysize = keylen;
2227	u8 hash[SHA512_DIGEST_SIZE];
2228	int ret;
2229
2230	if (keylen <= blocksize)
2231		memcpy(ctx->key, key, keysize);
2232	else {
2233		/* Must get the hash of the long key */
2234		ret = keyhash(tfm, key, keylen, hash);
2235
2236		if (ret)
 
2237			return -EINVAL;
 
2238
2239		keysize = digestsize;
2240		memcpy(ctx->key, hash, digestsize);
2241	}
2242
2243	if (ctx->keylen)
2244		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
2245
2246	ctx->keylen = keysize;
2247	ctx->dma_key = dma_map_single(dev, ctx->key, keysize, DMA_TO_DEVICE);
2248
2249	return 0;
2250}
2251
2252
2253struct talitos_alg_template {
2254	u32 type;
2255	u32 priority;
2256	union {
2257		struct skcipher_alg skcipher;
2258		struct ahash_alg hash;
2259		struct aead_alg aead;
2260	} alg;
2261	__be32 desc_hdr_template;
2262};
2263
2264static struct talitos_alg_template driver_algs[] = {
2265	/* AEAD algorithms.  These use a single-pass ipsec_esp descriptor */
2266	{	.type = CRYPTO_ALG_TYPE_AEAD,
2267		.alg.aead = {
2268			.base = {
2269				.cra_name = "authenc(hmac(sha1),cbc(aes))",
2270				.cra_driver_name = "authenc-hmac-sha1-"
2271						   "cbc-aes-talitos",
2272				.cra_blocksize = AES_BLOCK_SIZE,
2273				.cra_flags = CRYPTO_ALG_ASYNC |
2274					     CRYPTO_ALG_ALLOCATES_MEMORY,
2275			},
2276			.ivsize = AES_BLOCK_SIZE,
2277			.maxauthsize = SHA1_DIGEST_SIZE,
2278		},
2279		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2280			             DESC_HDR_SEL0_AESU |
2281		                     DESC_HDR_MODE0_AESU_CBC |
2282		                     DESC_HDR_SEL1_MDEUA |
2283		                     DESC_HDR_MODE1_MDEU_INIT |
2284		                     DESC_HDR_MODE1_MDEU_PAD |
2285		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2286	},
2287	{	.type = CRYPTO_ALG_TYPE_AEAD,
2288		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2289		.alg.aead = {
2290			.base = {
2291				.cra_name = "authenc(hmac(sha1),cbc(aes))",
2292				.cra_driver_name = "authenc-hmac-sha1-"
2293						   "cbc-aes-talitos-hsna",
2294				.cra_blocksize = AES_BLOCK_SIZE,
2295				.cra_flags = CRYPTO_ALG_ASYNC |
2296					     CRYPTO_ALG_ALLOCATES_MEMORY,
2297			},
2298			.ivsize = AES_BLOCK_SIZE,
2299			.maxauthsize = SHA1_DIGEST_SIZE,
2300		},
2301		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2302				     DESC_HDR_SEL0_AESU |
2303				     DESC_HDR_MODE0_AESU_CBC |
2304				     DESC_HDR_SEL1_MDEUA |
2305				     DESC_HDR_MODE1_MDEU_INIT |
2306				     DESC_HDR_MODE1_MDEU_PAD |
2307				     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2308	},
2309	{	.type = CRYPTO_ALG_TYPE_AEAD,
2310		.alg.aead = {
2311			.base = {
2312				.cra_name = "authenc(hmac(sha1),"
2313					    "cbc(des3_ede))",
2314				.cra_driver_name = "authenc-hmac-sha1-"
2315						   "cbc-3des-talitos",
2316				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2317				.cra_flags = CRYPTO_ALG_ASYNC |
2318					     CRYPTO_ALG_ALLOCATES_MEMORY,
2319			},
2320			.ivsize = DES3_EDE_BLOCK_SIZE,
2321			.maxauthsize = SHA1_DIGEST_SIZE,
2322			.setkey = aead_des3_setkey,
2323		},
2324		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2325			             DESC_HDR_SEL0_DEU |
2326		                     DESC_HDR_MODE0_DEU_CBC |
2327		                     DESC_HDR_MODE0_DEU_3DES |
2328		                     DESC_HDR_SEL1_MDEUA |
2329		                     DESC_HDR_MODE1_MDEU_INIT |
2330		                     DESC_HDR_MODE1_MDEU_PAD |
2331		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2332	},
2333	{	.type = CRYPTO_ALG_TYPE_AEAD,
2334		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2335		.alg.aead = {
2336			.base = {
2337				.cra_name = "authenc(hmac(sha1),"
2338					    "cbc(des3_ede))",
2339				.cra_driver_name = "authenc-hmac-sha1-"
2340						   "cbc-3des-talitos-hsna",
2341				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2342				.cra_flags = CRYPTO_ALG_ASYNC |
2343					     CRYPTO_ALG_ALLOCATES_MEMORY,
2344			},
2345			.ivsize = DES3_EDE_BLOCK_SIZE,
2346			.maxauthsize = SHA1_DIGEST_SIZE,
2347			.setkey = aead_des3_setkey,
2348		},
2349		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2350				     DESC_HDR_SEL0_DEU |
2351				     DESC_HDR_MODE0_DEU_CBC |
2352				     DESC_HDR_MODE0_DEU_3DES |
2353				     DESC_HDR_SEL1_MDEUA |
2354				     DESC_HDR_MODE1_MDEU_INIT |
2355				     DESC_HDR_MODE1_MDEU_PAD |
2356				     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2357	},
2358	{       .type = CRYPTO_ALG_TYPE_AEAD,
2359		.alg.aead = {
2360			.base = {
2361				.cra_name = "authenc(hmac(sha224),cbc(aes))",
2362				.cra_driver_name = "authenc-hmac-sha224-"
2363						   "cbc-aes-talitos",
2364				.cra_blocksize = AES_BLOCK_SIZE,
2365				.cra_flags = CRYPTO_ALG_ASYNC |
2366					     CRYPTO_ALG_ALLOCATES_MEMORY,
2367			},
2368			.ivsize = AES_BLOCK_SIZE,
2369			.maxauthsize = SHA224_DIGEST_SIZE,
2370		},
2371		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2372				     DESC_HDR_SEL0_AESU |
2373				     DESC_HDR_MODE0_AESU_CBC |
2374				     DESC_HDR_SEL1_MDEUA |
2375				     DESC_HDR_MODE1_MDEU_INIT |
2376				     DESC_HDR_MODE1_MDEU_PAD |
2377				     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2378	},
2379	{       .type = CRYPTO_ALG_TYPE_AEAD,
2380		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2381		.alg.aead = {
2382			.base = {
2383				.cra_name = "authenc(hmac(sha224),cbc(aes))",
2384				.cra_driver_name = "authenc-hmac-sha224-"
2385						   "cbc-aes-talitos-hsna",
2386				.cra_blocksize = AES_BLOCK_SIZE,
2387				.cra_flags = CRYPTO_ALG_ASYNC |
2388					     CRYPTO_ALG_ALLOCATES_MEMORY,
2389			},
2390			.ivsize = AES_BLOCK_SIZE,
2391			.maxauthsize = SHA224_DIGEST_SIZE,
2392		},
2393		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2394				     DESC_HDR_SEL0_AESU |
2395				     DESC_HDR_MODE0_AESU_CBC |
2396				     DESC_HDR_SEL1_MDEUA |
2397				     DESC_HDR_MODE1_MDEU_INIT |
2398				     DESC_HDR_MODE1_MDEU_PAD |
2399				     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2400	},
2401	{	.type = CRYPTO_ALG_TYPE_AEAD,
2402		.alg.aead = {
2403			.base = {
2404				.cra_name = "authenc(hmac(sha224),"
2405					    "cbc(des3_ede))",
2406				.cra_driver_name = "authenc-hmac-sha224-"
2407						   "cbc-3des-talitos",
2408				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2409				.cra_flags = CRYPTO_ALG_ASYNC |
2410					     CRYPTO_ALG_ALLOCATES_MEMORY,
2411			},
2412			.ivsize = DES3_EDE_BLOCK_SIZE,
2413			.maxauthsize = SHA224_DIGEST_SIZE,
2414			.setkey = aead_des3_setkey,
2415		},
2416		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2417			             DESC_HDR_SEL0_DEU |
2418		                     DESC_HDR_MODE0_DEU_CBC |
2419		                     DESC_HDR_MODE0_DEU_3DES |
2420		                     DESC_HDR_SEL1_MDEUA |
2421		                     DESC_HDR_MODE1_MDEU_INIT |
2422		                     DESC_HDR_MODE1_MDEU_PAD |
2423		                     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2424	},
2425	{	.type = CRYPTO_ALG_TYPE_AEAD,
2426		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2427		.alg.aead = {
2428			.base = {
2429				.cra_name = "authenc(hmac(sha224),"
2430					    "cbc(des3_ede))",
2431				.cra_driver_name = "authenc-hmac-sha224-"
2432						   "cbc-3des-talitos-hsna",
2433				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2434				.cra_flags = CRYPTO_ALG_ASYNC |
2435					     CRYPTO_ALG_ALLOCATES_MEMORY,
2436			},
2437			.ivsize = DES3_EDE_BLOCK_SIZE,
2438			.maxauthsize = SHA224_DIGEST_SIZE,
2439			.setkey = aead_des3_setkey,
2440		},
2441		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2442				     DESC_HDR_SEL0_DEU |
2443				     DESC_HDR_MODE0_DEU_CBC |
2444				     DESC_HDR_MODE0_DEU_3DES |
2445				     DESC_HDR_SEL1_MDEUA |
2446				     DESC_HDR_MODE1_MDEU_INIT |
2447				     DESC_HDR_MODE1_MDEU_PAD |
2448				     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2449	},
2450	{	.type = CRYPTO_ALG_TYPE_AEAD,
2451		.alg.aead = {
2452			.base = {
2453				.cra_name = "authenc(hmac(sha256),cbc(aes))",
2454				.cra_driver_name = "authenc-hmac-sha256-"
2455						   "cbc-aes-talitos",
2456				.cra_blocksize = AES_BLOCK_SIZE,
2457				.cra_flags = CRYPTO_ALG_ASYNC |
2458					     CRYPTO_ALG_ALLOCATES_MEMORY,
2459			},
2460			.ivsize = AES_BLOCK_SIZE,
2461			.maxauthsize = SHA256_DIGEST_SIZE,
2462		},
2463		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2464			             DESC_HDR_SEL0_AESU |
2465		                     DESC_HDR_MODE0_AESU_CBC |
2466		                     DESC_HDR_SEL1_MDEUA |
2467		                     DESC_HDR_MODE1_MDEU_INIT |
2468		                     DESC_HDR_MODE1_MDEU_PAD |
2469		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2470	},
2471	{	.type = CRYPTO_ALG_TYPE_AEAD,
2472		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2473		.alg.aead = {
2474			.base = {
2475				.cra_name = "authenc(hmac(sha256),cbc(aes))",
2476				.cra_driver_name = "authenc-hmac-sha256-"
2477						   "cbc-aes-talitos-hsna",
2478				.cra_blocksize = AES_BLOCK_SIZE,
2479				.cra_flags = CRYPTO_ALG_ASYNC |
2480					     CRYPTO_ALG_ALLOCATES_MEMORY,
2481			},
2482			.ivsize = AES_BLOCK_SIZE,
2483			.maxauthsize = SHA256_DIGEST_SIZE,
2484		},
2485		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2486				     DESC_HDR_SEL0_AESU |
2487				     DESC_HDR_MODE0_AESU_CBC |
2488				     DESC_HDR_SEL1_MDEUA |
2489				     DESC_HDR_MODE1_MDEU_INIT |
2490				     DESC_HDR_MODE1_MDEU_PAD |
2491				     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2492	},
2493	{	.type = CRYPTO_ALG_TYPE_AEAD,
2494		.alg.aead = {
2495			.base = {
2496				.cra_name = "authenc(hmac(sha256),"
2497					    "cbc(des3_ede))",
2498				.cra_driver_name = "authenc-hmac-sha256-"
2499						   "cbc-3des-talitos",
2500				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2501				.cra_flags = CRYPTO_ALG_ASYNC |
2502					     CRYPTO_ALG_ALLOCATES_MEMORY,
2503			},
2504			.ivsize = DES3_EDE_BLOCK_SIZE,
2505			.maxauthsize = SHA256_DIGEST_SIZE,
2506			.setkey = aead_des3_setkey,
2507		},
2508		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2509			             DESC_HDR_SEL0_DEU |
2510		                     DESC_HDR_MODE0_DEU_CBC |
2511		                     DESC_HDR_MODE0_DEU_3DES |
2512		                     DESC_HDR_SEL1_MDEUA |
2513		                     DESC_HDR_MODE1_MDEU_INIT |
2514		                     DESC_HDR_MODE1_MDEU_PAD |
2515		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2516	},
2517	{	.type = CRYPTO_ALG_TYPE_AEAD,
2518		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2519		.alg.aead = {
2520			.base = {
2521				.cra_name = "authenc(hmac(sha256),"
2522					    "cbc(des3_ede))",
2523				.cra_driver_name = "authenc-hmac-sha256-"
2524						   "cbc-3des-talitos-hsna",
2525				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2526				.cra_flags = CRYPTO_ALG_ASYNC |
2527					     CRYPTO_ALG_ALLOCATES_MEMORY,
2528			},
2529			.ivsize = DES3_EDE_BLOCK_SIZE,
2530			.maxauthsize = SHA256_DIGEST_SIZE,
2531			.setkey = aead_des3_setkey,
2532		},
2533		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2534				     DESC_HDR_SEL0_DEU |
2535				     DESC_HDR_MODE0_DEU_CBC |
2536				     DESC_HDR_MODE0_DEU_3DES |
2537				     DESC_HDR_SEL1_MDEUA |
2538				     DESC_HDR_MODE1_MDEU_INIT |
2539				     DESC_HDR_MODE1_MDEU_PAD |
2540				     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2541	},
2542	{	.type = CRYPTO_ALG_TYPE_AEAD,
2543		.alg.aead = {
2544			.base = {
2545				.cra_name = "authenc(hmac(sha384),cbc(aes))",
2546				.cra_driver_name = "authenc-hmac-sha384-"
2547						   "cbc-aes-talitos",
2548				.cra_blocksize = AES_BLOCK_SIZE,
2549				.cra_flags = CRYPTO_ALG_ASYNC |
2550					     CRYPTO_ALG_ALLOCATES_MEMORY,
2551			},
2552			.ivsize = AES_BLOCK_SIZE,
2553			.maxauthsize = SHA384_DIGEST_SIZE,
2554		},
2555		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2556			             DESC_HDR_SEL0_AESU |
2557		                     DESC_HDR_MODE0_AESU_CBC |
2558		                     DESC_HDR_SEL1_MDEUB |
2559		                     DESC_HDR_MODE1_MDEU_INIT |
2560		                     DESC_HDR_MODE1_MDEU_PAD |
2561		                     DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2562	},
2563	{	.type = CRYPTO_ALG_TYPE_AEAD,
2564		.alg.aead = {
2565			.base = {
2566				.cra_name = "authenc(hmac(sha384),"
2567					    "cbc(des3_ede))",
2568				.cra_driver_name = "authenc-hmac-sha384-"
2569						   "cbc-3des-talitos",
2570				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2571				.cra_flags = CRYPTO_ALG_ASYNC |
2572					     CRYPTO_ALG_ALLOCATES_MEMORY,
2573			},
2574			.ivsize = DES3_EDE_BLOCK_SIZE,
2575			.maxauthsize = SHA384_DIGEST_SIZE,
2576			.setkey = aead_des3_setkey,
2577		},
2578		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2579			             DESC_HDR_SEL0_DEU |
2580		                     DESC_HDR_MODE0_DEU_CBC |
2581		                     DESC_HDR_MODE0_DEU_3DES |
2582		                     DESC_HDR_SEL1_MDEUB |
2583		                     DESC_HDR_MODE1_MDEU_INIT |
2584		                     DESC_HDR_MODE1_MDEU_PAD |
2585		                     DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2586	},
2587	{	.type = CRYPTO_ALG_TYPE_AEAD,
2588		.alg.aead = {
2589			.base = {
2590				.cra_name = "authenc(hmac(sha512),cbc(aes))",
2591				.cra_driver_name = "authenc-hmac-sha512-"
2592						   "cbc-aes-talitos",
2593				.cra_blocksize = AES_BLOCK_SIZE,
2594				.cra_flags = CRYPTO_ALG_ASYNC |
2595					     CRYPTO_ALG_ALLOCATES_MEMORY,
2596			},
2597			.ivsize = AES_BLOCK_SIZE,
2598			.maxauthsize = SHA512_DIGEST_SIZE,
2599		},
2600		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2601			             DESC_HDR_SEL0_AESU |
2602		                     DESC_HDR_MODE0_AESU_CBC |
2603		                     DESC_HDR_SEL1_MDEUB |
2604		                     DESC_HDR_MODE1_MDEU_INIT |
2605		                     DESC_HDR_MODE1_MDEU_PAD |
2606		                     DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2607	},
2608	{	.type = CRYPTO_ALG_TYPE_AEAD,
2609		.alg.aead = {
2610			.base = {
2611				.cra_name = "authenc(hmac(sha512),"
2612					    "cbc(des3_ede))",
2613				.cra_driver_name = "authenc-hmac-sha512-"
2614						   "cbc-3des-talitos",
2615				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2616				.cra_flags = CRYPTO_ALG_ASYNC |
2617					     CRYPTO_ALG_ALLOCATES_MEMORY,
2618			},
2619			.ivsize = DES3_EDE_BLOCK_SIZE,
2620			.maxauthsize = SHA512_DIGEST_SIZE,
2621			.setkey = aead_des3_setkey,
2622		},
2623		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2624			             DESC_HDR_SEL0_DEU |
2625		                     DESC_HDR_MODE0_DEU_CBC |
2626		                     DESC_HDR_MODE0_DEU_3DES |
2627		                     DESC_HDR_SEL1_MDEUB |
2628		                     DESC_HDR_MODE1_MDEU_INIT |
2629		                     DESC_HDR_MODE1_MDEU_PAD |
2630		                     DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2631	},
2632	{	.type = CRYPTO_ALG_TYPE_AEAD,
2633		.alg.aead = {
2634			.base = {
2635				.cra_name = "authenc(hmac(md5),cbc(aes))",
2636				.cra_driver_name = "authenc-hmac-md5-"
2637						   "cbc-aes-talitos",
2638				.cra_blocksize = AES_BLOCK_SIZE,
2639				.cra_flags = CRYPTO_ALG_ASYNC |
2640					     CRYPTO_ALG_ALLOCATES_MEMORY,
2641			},
2642			.ivsize = AES_BLOCK_SIZE,
2643			.maxauthsize = MD5_DIGEST_SIZE,
2644		},
2645		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2646			             DESC_HDR_SEL0_AESU |
2647		                     DESC_HDR_MODE0_AESU_CBC |
2648		                     DESC_HDR_SEL1_MDEUA |
2649		                     DESC_HDR_MODE1_MDEU_INIT |
2650		                     DESC_HDR_MODE1_MDEU_PAD |
2651		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2652	},
2653	{	.type = CRYPTO_ALG_TYPE_AEAD,
2654		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2655		.alg.aead = {
2656			.base = {
2657				.cra_name = "authenc(hmac(md5),cbc(aes))",
2658				.cra_driver_name = "authenc-hmac-md5-"
2659						   "cbc-aes-talitos-hsna",
2660				.cra_blocksize = AES_BLOCK_SIZE,
2661				.cra_flags = CRYPTO_ALG_ASYNC |
2662					     CRYPTO_ALG_ALLOCATES_MEMORY,
2663			},
2664			.ivsize = AES_BLOCK_SIZE,
2665			.maxauthsize = MD5_DIGEST_SIZE,
2666		},
2667		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2668				     DESC_HDR_SEL0_AESU |
2669				     DESC_HDR_MODE0_AESU_CBC |
2670				     DESC_HDR_SEL1_MDEUA |
2671				     DESC_HDR_MODE1_MDEU_INIT |
2672				     DESC_HDR_MODE1_MDEU_PAD |
2673				     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2674	},
2675	{	.type = CRYPTO_ALG_TYPE_AEAD,
2676		.alg.aead = {
2677			.base = {
2678				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2679				.cra_driver_name = "authenc-hmac-md5-"
2680						   "cbc-3des-talitos",
2681				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2682				.cra_flags = CRYPTO_ALG_ASYNC |
2683					     CRYPTO_ALG_ALLOCATES_MEMORY,
2684			},
2685			.ivsize = DES3_EDE_BLOCK_SIZE,
2686			.maxauthsize = MD5_DIGEST_SIZE,
2687			.setkey = aead_des3_setkey,
2688		},
2689		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2690			             DESC_HDR_SEL0_DEU |
2691		                     DESC_HDR_MODE0_DEU_CBC |
2692		                     DESC_HDR_MODE0_DEU_3DES |
2693		                     DESC_HDR_SEL1_MDEUA |
2694		                     DESC_HDR_MODE1_MDEU_INIT |
2695		                     DESC_HDR_MODE1_MDEU_PAD |
2696		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2697	},
2698	{	.type = CRYPTO_ALG_TYPE_AEAD,
2699		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2700		.alg.aead = {
2701			.base = {
2702				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2703				.cra_driver_name = "authenc-hmac-md5-"
2704						   "cbc-3des-talitos-hsna",
2705				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2706				.cra_flags = CRYPTO_ALG_ASYNC |
2707					     CRYPTO_ALG_ALLOCATES_MEMORY,
2708			},
2709			.ivsize = DES3_EDE_BLOCK_SIZE,
2710			.maxauthsize = MD5_DIGEST_SIZE,
2711			.setkey = aead_des3_setkey,
2712		},
2713		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2714				     DESC_HDR_SEL0_DEU |
2715				     DESC_HDR_MODE0_DEU_CBC |
2716				     DESC_HDR_MODE0_DEU_3DES |
2717				     DESC_HDR_SEL1_MDEUA |
2718				     DESC_HDR_MODE1_MDEU_INIT |
2719				     DESC_HDR_MODE1_MDEU_PAD |
2720				     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2721	},
2722	/* SKCIPHER algorithms. */
2723	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
2724		.alg.skcipher = {
2725			.base.cra_name = "ecb(aes)",
2726			.base.cra_driver_name = "ecb-aes-talitos",
2727			.base.cra_blocksize = AES_BLOCK_SIZE,
2728			.base.cra_flags = CRYPTO_ALG_ASYNC |
2729					  CRYPTO_ALG_ALLOCATES_MEMORY,
2730			.min_keysize = AES_MIN_KEY_SIZE,
2731			.max_keysize = AES_MAX_KEY_SIZE,
2732			.setkey = skcipher_aes_setkey,
 
 
2733		},
2734		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2735				     DESC_HDR_SEL0_AESU,
2736	},
2737	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
2738		.alg.skcipher = {
2739			.base.cra_name = "cbc(aes)",
2740			.base.cra_driver_name = "cbc-aes-talitos",
2741			.base.cra_blocksize = AES_BLOCK_SIZE,
2742			.base.cra_flags = CRYPTO_ALG_ASYNC |
2743					  CRYPTO_ALG_ALLOCATES_MEMORY,
2744			.min_keysize = AES_MIN_KEY_SIZE,
2745			.max_keysize = AES_MAX_KEY_SIZE,
2746			.ivsize = AES_BLOCK_SIZE,
2747			.setkey = skcipher_aes_setkey,
 
 
2748		},
2749		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2750				     DESC_HDR_SEL0_AESU |
2751				     DESC_HDR_MODE0_AESU_CBC,
2752	},
2753	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
2754		.alg.skcipher = {
2755			.base.cra_name = "ctr(aes)",
2756			.base.cra_driver_name = "ctr-aes-talitos",
2757			.base.cra_blocksize = 1,
2758			.base.cra_flags = CRYPTO_ALG_ASYNC |
2759					  CRYPTO_ALG_ALLOCATES_MEMORY,
2760			.min_keysize = AES_MIN_KEY_SIZE,
2761			.max_keysize = AES_MAX_KEY_SIZE,
2762			.ivsize = AES_BLOCK_SIZE,
2763			.setkey = skcipher_aes_setkey,
 
 
2764		},
2765		.desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
2766				     DESC_HDR_SEL0_AESU |
2767				     DESC_HDR_MODE0_AESU_CTR,
2768	},
2769	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
2770		.alg.skcipher = {
2771			.base.cra_name = "ctr(aes)",
2772			.base.cra_driver_name = "ctr-aes-talitos",
2773			.base.cra_blocksize = 1,
2774			.base.cra_flags = CRYPTO_ALG_ASYNC |
2775					  CRYPTO_ALG_ALLOCATES_MEMORY,
2776			.min_keysize = AES_MIN_KEY_SIZE,
2777			.max_keysize = AES_MAX_KEY_SIZE,
2778			.ivsize = AES_BLOCK_SIZE,
2779			.setkey = skcipher_aes_setkey,
2780		},
2781		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2782				     DESC_HDR_SEL0_AESU |
2783				     DESC_HDR_MODE0_AESU_CTR,
2784	},
2785	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
2786		.alg.skcipher = {
2787			.base.cra_name = "ecb(des)",
2788			.base.cra_driver_name = "ecb-des-talitos",
2789			.base.cra_blocksize = DES_BLOCK_SIZE,
2790			.base.cra_flags = CRYPTO_ALG_ASYNC |
2791					  CRYPTO_ALG_ALLOCATES_MEMORY,
2792			.min_keysize = DES_KEY_SIZE,
2793			.max_keysize = DES_KEY_SIZE,
2794			.setkey = skcipher_des_setkey,
2795		},
2796		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2797				     DESC_HDR_SEL0_DEU,
2798	},
2799	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
2800		.alg.skcipher = {
2801			.base.cra_name = "cbc(des)",
2802			.base.cra_driver_name = "cbc-des-talitos",
2803			.base.cra_blocksize = DES_BLOCK_SIZE,
2804			.base.cra_flags = CRYPTO_ALG_ASYNC |
2805					  CRYPTO_ALG_ALLOCATES_MEMORY,
2806			.min_keysize = DES_KEY_SIZE,
2807			.max_keysize = DES_KEY_SIZE,
2808			.ivsize = DES_BLOCK_SIZE,
2809			.setkey = skcipher_des_setkey,
 
 
2810		},
2811		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2812				     DESC_HDR_SEL0_DEU |
2813				     DESC_HDR_MODE0_DEU_CBC,
2814	},
2815	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
2816		.alg.skcipher = {
2817			.base.cra_name = "ecb(des3_ede)",
2818			.base.cra_driver_name = "ecb-3des-talitos",
2819			.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2820			.base.cra_flags = CRYPTO_ALG_ASYNC |
2821					  CRYPTO_ALG_ALLOCATES_MEMORY,
2822			.min_keysize = DES3_EDE_KEY_SIZE,
2823			.max_keysize = DES3_EDE_KEY_SIZE,
2824			.setkey = skcipher_des3_setkey,
 
 
2825		},
2826		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2827				     DESC_HDR_SEL0_DEU |
2828				     DESC_HDR_MODE0_DEU_3DES,
2829	},
2830	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
2831		.alg.skcipher = {
2832			.base.cra_name = "cbc(des3_ede)",
2833			.base.cra_driver_name = "cbc-3des-talitos",
2834			.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2835			.base.cra_flags = CRYPTO_ALG_ASYNC |
2836					  CRYPTO_ALG_ALLOCATES_MEMORY,
2837			.min_keysize = DES3_EDE_KEY_SIZE,
2838			.max_keysize = DES3_EDE_KEY_SIZE,
2839			.ivsize = DES3_EDE_BLOCK_SIZE,
2840			.setkey = skcipher_des3_setkey,
 
 
2841		},
2842		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2843			             DESC_HDR_SEL0_DEU |
2844		                     DESC_HDR_MODE0_DEU_CBC |
2845		                     DESC_HDR_MODE0_DEU_3DES,
2846	},
2847	/* AHASH algorithms. */
2848	{	.type = CRYPTO_ALG_TYPE_AHASH,
2849		.alg.hash = {
2850			.halg.digestsize = MD5_DIGEST_SIZE,
2851			.halg.statesize = sizeof(struct talitos_export_state),
2852			.halg.base = {
2853				.cra_name = "md5",
2854				.cra_driver_name = "md5-talitos",
2855				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2856				.cra_flags = CRYPTO_ALG_ASYNC |
2857					     CRYPTO_ALG_ALLOCATES_MEMORY,
2858			}
2859		},
2860		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2861				     DESC_HDR_SEL0_MDEUA |
2862				     DESC_HDR_MODE0_MDEU_MD5,
2863	},
2864	{	.type = CRYPTO_ALG_TYPE_AHASH,
2865		.alg.hash = {
2866			.halg.digestsize = SHA1_DIGEST_SIZE,
2867			.halg.statesize = sizeof(struct talitos_export_state),
2868			.halg.base = {
2869				.cra_name = "sha1",
2870				.cra_driver_name = "sha1-talitos",
2871				.cra_blocksize = SHA1_BLOCK_SIZE,
2872				.cra_flags = CRYPTO_ALG_ASYNC |
2873					     CRYPTO_ALG_ALLOCATES_MEMORY,
2874			}
2875		},
2876		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2877				     DESC_HDR_SEL0_MDEUA |
2878				     DESC_HDR_MODE0_MDEU_SHA1,
2879	},
2880	{	.type = CRYPTO_ALG_TYPE_AHASH,
2881		.alg.hash = {
2882			.halg.digestsize = SHA224_DIGEST_SIZE,
2883			.halg.statesize = sizeof(struct talitos_export_state),
2884			.halg.base = {
2885				.cra_name = "sha224",
2886				.cra_driver_name = "sha224-talitos",
2887				.cra_blocksize = SHA224_BLOCK_SIZE,
2888				.cra_flags = CRYPTO_ALG_ASYNC |
2889					     CRYPTO_ALG_ALLOCATES_MEMORY,
2890			}
2891		},
2892		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2893				     DESC_HDR_SEL0_MDEUA |
2894				     DESC_HDR_MODE0_MDEU_SHA224,
2895	},
2896	{	.type = CRYPTO_ALG_TYPE_AHASH,
2897		.alg.hash = {
2898			.halg.digestsize = SHA256_DIGEST_SIZE,
2899			.halg.statesize = sizeof(struct talitos_export_state),
2900			.halg.base = {
2901				.cra_name = "sha256",
2902				.cra_driver_name = "sha256-talitos",
2903				.cra_blocksize = SHA256_BLOCK_SIZE,
2904				.cra_flags = CRYPTO_ALG_ASYNC |
2905					     CRYPTO_ALG_ALLOCATES_MEMORY,
2906			}
2907		},
2908		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2909				     DESC_HDR_SEL0_MDEUA |
2910				     DESC_HDR_MODE0_MDEU_SHA256,
2911	},
2912	{	.type = CRYPTO_ALG_TYPE_AHASH,
2913		.alg.hash = {
2914			.halg.digestsize = SHA384_DIGEST_SIZE,
2915			.halg.statesize = sizeof(struct talitos_export_state),
2916			.halg.base = {
2917				.cra_name = "sha384",
2918				.cra_driver_name = "sha384-talitos",
2919				.cra_blocksize = SHA384_BLOCK_SIZE,
2920				.cra_flags = CRYPTO_ALG_ASYNC |
2921					     CRYPTO_ALG_ALLOCATES_MEMORY,
2922			}
2923		},
2924		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2925				     DESC_HDR_SEL0_MDEUB |
2926				     DESC_HDR_MODE0_MDEUB_SHA384,
2927	},
2928	{	.type = CRYPTO_ALG_TYPE_AHASH,
2929		.alg.hash = {
2930			.halg.digestsize = SHA512_DIGEST_SIZE,
2931			.halg.statesize = sizeof(struct talitos_export_state),
2932			.halg.base = {
2933				.cra_name = "sha512",
2934				.cra_driver_name = "sha512-talitos",
2935				.cra_blocksize = SHA512_BLOCK_SIZE,
2936				.cra_flags = CRYPTO_ALG_ASYNC |
2937					     CRYPTO_ALG_ALLOCATES_MEMORY,
2938			}
2939		},
2940		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2941				     DESC_HDR_SEL0_MDEUB |
2942				     DESC_HDR_MODE0_MDEUB_SHA512,
2943	},
2944	{	.type = CRYPTO_ALG_TYPE_AHASH,
2945		.alg.hash = {
2946			.halg.digestsize = MD5_DIGEST_SIZE,
2947			.halg.statesize = sizeof(struct talitos_export_state),
2948			.halg.base = {
2949				.cra_name = "hmac(md5)",
2950				.cra_driver_name = "hmac-md5-talitos",
2951				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2952				.cra_flags = CRYPTO_ALG_ASYNC |
2953					     CRYPTO_ALG_ALLOCATES_MEMORY,
2954			}
2955		},
2956		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2957				     DESC_HDR_SEL0_MDEUA |
2958				     DESC_HDR_MODE0_MDEU_MD5,
2959	},
2960	{	.type = CRYPTO_ALG_TYPE_AHASH,
2961		.alg.hash = {
2962			.halg.digestsize = SHA1_DIGEST_SIZE,
2963			.halg.statesize = sizeof(struct talitos_export_state),
2964			.halg.base = {
2965				.cra_name = "hmac(sha1)",
2966				.cra_driver_name = "hmac-sha1-talitos",
2967				.cra_blocksize = SHA1_BLOCK_SIZE,
2968				.cra_flags = CRYPTO_ALG_ASYNC |
2969					     CRYPTO_ALG_ALLOCATES_MEMORY,
2970			}
2971		},
2972		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2973				     DESC_HDR_SEL0_MDEUA |
2974				     DESC_HDR_MODE0_MDEU_SHA1,
2975	},
2976	{	.type = CRYPTO_ALG_TYPE_AHASH,
2977		.alg.hash = {
2978			.halg.digestsize = SHA224_DIGEST_SIZE,
2979			.halg.statesize = sizeof(struct talitos_export_state),
2980			.halg.base = {
2981				.cra_name = "hmac(sha224)",
2982				.cra_driver_name = "hmac-sha224-talitos",
2983				.cra_blocksize = SHA224_BLOCK_SIZE,
2984				.cra_flags = CRYPTO_ALG_ASYNC |
2985					     CRYPTO_ALG_ALLOCATES_MEMORY,
2986			}
2987		},
2988		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2989				     DESC_HDR_SEL0_MDEUA |
2990				     DESC_HDR_MODE0_MDEU_SHA224,
2991	},
2992	{	.type = CRYPTO_ALG_TYPE_AHASH,
2993		.alg.hash = {
2994			.halg.digestsize = SHA256_DIGEST_SIZE,
2995			.halg.statesize = sizeof(struct talitos_export_state),
2996			.halg.base = {
2997				.cra_name = "hmac(sha256)",
2998				.cra_driver_name = "hmac-sha256-talitos",
2999				.cra_blocksize = SHA256_BLOCK_SIZE,
3000				.cra_flags = CRYPTO_ALG_ASYNC |
3001					     CRYPTO_ALG_ALLOCATES_MEMORY,
3002			}
3003		},
3004		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3005				     DESC_HDR_SEL0_MDEUA |
3006				     DESC_HDR_MODE0_MDEU_SHA256,
3007	},
3008	{	.type = CRYPTO_ALG_TYPE_AHASH,
3009		.alg.hash = {
3010			.halg.digestsize = SHA384_DIGEST_SIZE,
3011			.halg.statesize = sizeof(struct talitos_export_state),
3012			.halg.base = {
3013				.cra_name = "hmac(sha384)",
3014				.cra_driver_name = "hmac-sha384-talitos",
3015				.cra_blocksize = SHA384_BLOCK_SIZE,
3016				.cra_flags = CRYPTO_ALG_ASYNC |
3017					     CRYPTO_ALG_ALLOCATES_MEMORY,
3018			}
3019		},
3020		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3021				     DESC_HDR_SEL0_MDEUB |
3022				     DESC_HDR_MODE0_MDEUB_SHA384,
3023	},
3024	{	.type = CRYPTO_ALG_TYPE_AHASH,
3025		.alg.hash = {
3026			.halg.digestsize = SHA512_DIGEST_SIZE,
3027			.halg.statesize = sizeof(struct talitos_export_state),
3028			.halg.base = {
3029				.cra_name = "hmac(sha512)",
3030				.cra_driver_name = "hmac-sha512-talitos",
3031				.cra_blocksize = SHA512_BLOCK_SIZE,
3032				.cra_flags = CRYPTO_ALG_ASYNC |
3033					     CRYPTO_ALG_ALLOCATES_MEMORY,
3034			}
3035		},
3036		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3037				     DESC_HDR_SEL0_MDEUB |
3038				     DESC_HDR_MODE0_MDEUB_SHA512,
3039	}
3040};
3041
3042struct talitos_crypto_alg {
3043	struct list_head entry;
3044	struct device *dev;
3045	struct talitos_alg_template algt;
3046};
3047
3048static int talitos_init_common(struct talitos_ctx *ctx,
3049			       struct talitos_crypto_alg *talitos_alg)
3050{
3051	struct talitos_private *priv;
3052
3053	/* update context with ptr to dev */
3054	ctx->dev = talitos_alg->dev;
3055
3056	/* assign SEC channel to tfm in round-robin fashion */
3057	priv = dev_get_drvdata(ctx->dev);
3058	ctx->ch = atomic_inc_return(&priv->last_chan) &
3059		  (priv->num_channels - 1);
3060
3061	/* copy descriptor header template value */
3062	ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
3063
3064	/* select done notification */
3065	ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
3066
3067	return 0;
3068}
3069
3070static int talitos_cra_init_aead(struct crypto_aead *tfm)
3071{
3072	struct aead_alg *alg = crypto_aead_alg(tfm);
3073	struct talitos_crypto_alg *talitos_alg;
3074	struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
3075
3076	talitos_alg = container_of(alg, struct talitos_crypto_alg,
3077				   algt.alg.aead);
 
 
 
 
 
3078
3079	return talitos_init_common(ctx, talitos_alg);
3080}
3081
3082static int talitos_cra_init_skcipher(struct crypto_skcipher *tfm)
3083{
3084	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
3085	struct talitos_crypto_alg *talitos_alg;
3086	struct talitos_ctx *ctx = crypto_skcipher_ctx(tfm);
3087
3088	talitos_alg = container_of(alg, struct talitos_crypto_alg,
3089				   algt.alg.skcipher);
3090
3091	return talitos_init_common(ctx, talitos_alg);
3092}
3093
3094static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
3095{
3096	struct crypto_alg *alg = tfm->__crt_alg;
3097	struct talitos_crypto_alg *talitos_alg;
3098	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3099
3100	talitos_alg = container_of(__crypto_ahash_alg(alg),
3101				   struct talitos_crypto_alg,
3102				   algt.alg.hash);
3103
3104	ctx->keylen = 0;
3105	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
3106				 sizeof(struct talitos_ahash_req_ctx));
3107
3108	return talitos_init_common(ctx, talitos_alg);
3109}
3110
3111static void talitos_cra_exit(struct crypto_tfm *tfm)
3112{
3113	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3114	struct device *dev = ctx->dev;
3115
3116	if (ctx->keylen)
3117		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
3118}
3119
3120/*
3121 * given the alg's descriptor header template, determine whether descriptor
3122 * type and primary/secondary execution units required match the hw
3123 * capabilities description provided in the device tree node.
3124 */
3125static int hw_supports(struct device *dev, __be32 desc_hdr_template)
3126{
3127	struct talitos_private *priv = dev_get_drvdata(dev);
3128	int ret;
3129
3130	ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
3131	      (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
3132
3133	if (SECONDARY_EU(desc_hdr_template))
3134		ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
3135		              & priv->exec_units);
3136
3137	return ret;
3138}
3139
3140static void talitos_remove(struct platform_device *ofdev)
3141{
3142	struct device *dev = &ofdev->dev;
3143	struct talitos_private *priv = dev_get_drvdata(dev);
3144	struct talitos_crypto_alg *t_alg, *n;
3145	int i;
3146
3147	list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
3148		switch (t_alg->algt.type) {
3149		case CRYPTO_ALG_TYPE_SKCIPHER:
3150			crypto_unregister_skcipher(&t_alg->algt.alg.skcipher);
3151			break;
3152		case CRYPTO_ALG_TYPE_AEAD:
3153			crypto_unregister_aead(&t_alg->algt.alg.aead);
3154			break;
3155		case CRYPTO_ALG_TYPE_AHASH:
3156			crypto_unregister_ahash(&t_alg->algt.alg.hash);
3157			break;
3158		}
3159		list_del(&t_alg->entry);
3160	}
3161
3162	if (hw_supports(dev, DESC_HDR_SEL0_RNG))
3163		talitos_unregister_rng(dev);
3164
3165	for (i = 0; i < 2; i++)
3166		if (priv->irq[i]) {
3167			free_irq(priv->irq[i], dev);
3168			irq_dispose_mapping(priv->irq[i]);
3169		}
3170
3171	tasklet_kill(&priv->done_task[0]);
3172	if (priv->irq[1])
3173		tasklet_kill(&priv->done_task[1]);
 
 
3174}
3175
3176static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
3177						    struct talitos_alg_template
3178						           *template)
3179{
3180	struct talitos_private *priv = dev_get_drvdata(dev);
3181	struct talitos_crypto_alg *t_alg;
3182	struct crypto_alg *alg;
3183
3184	t_alg = devm_kzalloc(dev, sizeof(struct talitos_crypto_alg),
3185			     GFP_KERNEL);
3186	if (!t_alg)
3187		return ERR_PTR(-ENOMEM);
3188
3189	t_alg->algt = *template;
3190
3191	switch (t_alg->algt.type) {
3192	case CRYPTO_ALG_TYPE_SKCIPHER:
3193		alg = &t_alg->algt.alg.skcipher.base;
 
3194		alg->cra_exit = talitos_cra_exit;
3195		t_alg->algt.alg.skcipher.init = talitos_cra_init_skcipher;
3196		t_alg->algt.alg.skcipher.setkey =
3197			t_alg->algt.alg.skcipher.setkey ?: skcipher_setkey;
3198		t_alg->algt.alg.skcipher.encrypt = skcipher_encrypt;
3199		t_alg->algt.alg.skcipher.decrypt = skcipher_decrypt;
3200		if (!strcmp(alg->cra_name, "ctr(aes)") && !has_ftr_sec1(priv) &&
3201		    DESC_TYPE(t_alg->algt.desc_hdr_template) !=
3202		    DESC_TYPE(DESC_HDR_TYPE_AESU_CTR_NONSNOOP)) {
3203			devm_kfree(dev, t_alg);
3204			return ERR_PTR(-ENOTSUPP);
3205		}
3206		break;
3207	case CRYPTO_ALG_TYPE_AEAD:
3208		alg = &t_alg->algt.alg.aead.base;
3209		alg->cra_exit = talitos_cra_exit;
3210		t_alg->algt.alg.aead.init = talitos_cra_init_aead;
3211		t_alg->algt.alg.aead.setkey = t_alg->algt.alg.aead.setkey ?:
3212					      aead_setkey;
3213		t_alg->algt.alg.aead.encrypt = aead_encrypt;
3214		t_alg->algt.alg.aead.decrypt = aead_decrypt;
3215		if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3216		    !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) {
3217			devm_kfree(dev, t_alg);
3218			return ERR_PTR(-ENOTSUPP);
3219		}
3220		break;
3221	case CRYPTO_ALG_TYPE_AHASH:
3222		alg = &t_alg->algt.alg.hash.halg.base;
3223		alg->cra_init = talitos_cra_init_ahash;
3224		alg->cra_exit = talitos_cra_exit;
3225		t_alg->algt.alg.hash.init = ahash_init;
3226		t_alg->algt.alg.hash.update = ahash_update;
3227		t_alg->algt.alg.hash.final = ahash_final;
3228		t_alg->algt.alg.hash.finup = ahash_finup;
3229		t_alg->algt.alg.hash.digest = ahash_digest;
3230		if (!strncmp(alg->cra_name, "hmac", 4))
3231			t_alg->algt.alg.hash.setkey = ahash_setkey;
3232		t_alg->algt.alg.hash.import = ahash_import;
3233		t_alg->algt.alg.hash.export = ahash_export;
3234
3235		if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
3236		    !strncmp(alg->cra_name, "hmac", 4)) {
3237			devm_kfree(dev, t_alg);
3238			return ERR_PTR(-ENOTSUPP);
3239		}
3240		if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3241		    (!strcmp(alg->cra_name, "sha224") ||
3242		     !strcmp(alg->cra_name, "hmac(sha224)"))) {
3243			t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
3244			t_alg->algt.alg.hash.digest =
3245				ahash_digest_sha224_swinit;
3246			t_alg->algt.desc_hdr_template =
3247					DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3248					DESC_HDR_SEL0_MDEUA |
3249					DESC_HDR_MODE0_MDEU_SHA256;
3250		}
3251		break;
3252	default:
3253		dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
3254		devm_kfree(dev, t_alg);
3255		return ERR_PTR(-EINVAL);
3256	}
3257
3258	alg->cra_module = THIS_MODULE;
3259	if (t_alg->algt.priority)
3260		alg->cra_priority = t_alg->algt.priority;
3261	else
3262		alg->cra_priority = TALITOS_CRA_PRIORITY;
3263	if (has_ftr_sec1(priv) && t_alg->algt.type != CRYPTO_ALG_TYPE_AHASH)
3264		alg->cra_alignmask = 3;
3265	else
3266		alg->cra_alignmask = 0;
3267	alg->cra_ctxsize = sizeof(struct talitos_ctx);
3268	alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
3269
3270	t_alg->dev = dev;
3271
3272	return t_alg;
3273}
3274
3275static int talitos_probe_irq(struct platform_device *ofdev)
3276{
3277	struct device *dev = &ofdev->dev;
3278	struct device_node *np = ofdev->dev.of_node;
3279	struct talitos_private *priv = dev_get_drvdata(dev);
3280	int err;
3281	bool is_sec1 = has_ftr_sec1(priv);
3282
3283	priv->irq[0] = irq_of_parse_and_map(np, 0);
3284	if (!priv->irq[0]) {
3285		dev_err(dev, "failed to map irq\n");
3286		return -EINVAL;
3287	}
3288	if (is_sec1) {
3289		err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
3290				  dev_driver_string(dev), dev);
3291		goto primary_out;
3292	}
3293
3294	priv->irq[1] = irq_of_parse_and_map(np, 1);
3295
3296	/* get the primary irq line */
3297	if (!priv->irq[1]) {
3298		err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
3299				  dev_driver_string(dev), dev);
3300		goto primary_out;
3301	}
3302
3303	err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
3304			  dev_driver_string(dev), dev);
3305	if (err)
3306		goto primary_out;
3307
3308	/* get the secondary irq line */
3309	err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
3310			  dev_driver_string(dev), dev);
3311	if (err) {
3312		dev_err(dev, "failed to request secondary irq\n");
3313		irq_dispose_mapping(priv->irq[1]);
3314		priv->irq[1] = 0;
3315	}
3316
3317	return err;
3318
3319primary_out:
3320	if (err) {
3321		dev_err(dev, "failed to request primary irq\n");
3322		irq_dispose_mapping(priv->irq[0]);
3323		priv->irq[0] = 0;
3324	}
3325
3326	return err;
3327}
3328
3329static int talitos_probe(struct platform_device *ofdev)
3330{
3331	struct device *dev = &ofdev->dev;
3332	struct device_node *np = ofdev->dev.of_node;
3333	struct talitos_private *priv;
3334	int i, err;
3335	int stride;
3336	struct resource *res;
3337
3338	priv = devm_kzalloc(dev, sizeof(struct talitos_private), GFP_KERNEL);
3339	if (!priv)
3340		return -ENOMEM;
3341
3342	INIT_LIST_HEAD(&priv->alg_list);
3343
3344	dev_set_drvdata(dev, priv);
3345
3346	priv->ofdev = ofdev;
3347
3348	spin_lock_init(&priv->reg_lock);
3349
3350	res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
3351	if (!res)
3352		return -ENXIO;
3353	priv->reg = devm_ioremap(dev, res->start, resource_size(res));
3354	if (!priv->reg) {
3355		dev_err(dev, "failed to of_iomap\n");
3356		err = -ENOMEM;
3357		goto err_out;
3358	}
3359
3360	/* get SEC version capabilities from device tree */
3361	of_property_read_u32(np, "fsl,num-channels", &priv->num_channels);
3362	of_property_read_u32(np, "fsl,channel-fifo-len", &priv->chfifo_len);
3363	of_property_read_u32(np, "fsl,exec-units-mask", &priv->exec_units);
3364	of_property_read_u32(np, "fsl,descriptor-types-mask",
3365			     &priv->desc_types);
3366
3367	if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
3368	    !priv->exec_units || !priv->desc_types) {
3369		dev_err(dev, "invalid property data in device tree node\n");
3370		err = -EINVAL;
3371		goto err_out;
3372	}
3373
3374	if (of_device_is_compatible(np, "fsl,sec3.0"))
3375		priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
3376
3377	if (of_device_is_compatible(np, "fsl,sec2.1"))
3378		priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
3379				  TALITOS_FTR_SHA224_HWINIT |
3380				  TALITOS_FTR_HMAC_OK;
3381
3382	if (of_device_is_compatible(np, "fsl,sec1.0"))
3383		priv->features |= TALITOS_FTR_SEC1;
3384
3385	if (of_device_is_compatible(np, "fsl,sec1.2")) {
3386		priv->reg_deu = priv->reg + TALITOS12_DEU;
3387		priv->reg_aesu = priv->reg + TALITOS12_AESU;
3388		priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
3389		stride = TALITOS1_CH_STRIDE;
3390	} else if (of_device_is_compatible(np, "fsl,sec1.0")) {
3391		priv->reg_deu = priv->reg + TALITOS10_DEU;
3392		priv->reg_aesu = priv->reg + TALITOS10_AESU;
3393		priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
3394		priv->reg_afeu = priv->reg + TALITOS10_AFEU;
3395		priv->reg_rngu = priv->reg + TALITOS10_RNGU;
3396		priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
3397		stride = TALITOS1_CH_STRIDE;
3398	} else {
3399		priv->reg_deu = priv->reg + TALITOS2_DEU;
3400		priv->reg_aesu = priv->reg + TALITOS2_AESU;
3401		priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
3402		priv->reg_afeu = priv->reg + TALITOS2_AFEU;
3403		priv->reg_rngu = priv->reg + TALITOS2_RNGU;
3404		priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
3405		priv->reg_keu = priv->reg + TALITOS2_KEU;
3406		priv->reg_crcu = priv->reg + TALITOS2_CRCU;
3407		stride = TALITOS2_CH_STRIDE;
3408	}
3409
3410	err = talitos_probe_irq(ofdev);
3411	if (err)
3412		goto err_out;
3413
3414	if (has_ftr_sec1(priv)) {
3415		if (priv->num_channels == 1)
3416			tasklet_init(&priv->done_task[0], talitos1_done_ch0,
3417				     (unsigned long)dev);
3418		else
3419			tasklet_init(&priv->done_task[0], talitos1_done_4ch,
3420				     (unsigned long)dev);
3421	} else {
3422		if (priv->irq[1]) {
3423			tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
3424				     (unsigned long)dev);
3425			tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
3426				     (unsigned long)dev);
3427		} else if (priv->num_channels == 1) {
3428			tasklet_init(&priv->done_task[0], talitos2_done_ch0,
3429				     (unsigned long)dev);
3430		} else {
3431			tasklet_init(&priv->done_task[0], talitos2_done_4ch,
3432				     (unsigned long)dev);
3433		}
3434	}
3435
3436	priv->chan = devm_kcalloc(dev,
3437				  priv->num_channels,
3438				  sizeof(struct talitos_channel),
3439				  GFP_KERNEL);
3440	if (!priv->chan) {
3441		dev_err(dev, "failed to allocate channel management space\n");
3442		err = -ENOMEM;
3443		goto err_out;
3444	}
3445
3446	priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
3447
3448	for (i = 0; i < priv->num_channels; i++) {
3449		priv->chan[i].reg = priv->reg + stride * (i + 1);
3450		if (!priv->irq[1] || !(i & 1))
3451			priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
3452
3453		spin_lock_init(&priv->chan[i].head_lock);
3454		spin_lock_init(&priv->chan[i].tail_lock);
3455
3456		priv->chan[i].fifo = devm_kcalloc(dev,
3457						priv->fifo_len,
3458						sizeof(struct talitos_request),
3459						GFP_KERNEL);
3460		if (!priv->chan[i].fifo) {
3461			dev_err(dev, "failed to allocate request fifo %d\n", i);
3462			err = -ENOMEM;
3463			goto err_out;
3464		}
3465
3466		atomic_set(&priv->chan[i].submit_count,
3467			   -(priv->chfifo_len - 1));
3468	}
3469
3470	dma_set_mask(dev, DMA_BIT_MASK(36));
3471
3472	/* reset and initialize the h/w */
3473	err = init_device(dev);
3474	if (err) {
3475		dev_err(dev, "failed to initialize device\n");
3476		goto err_out;
3477	}
3478
3479	/* register the RNG, if available */
3480	if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3481		err = talitos_register_rng(dev);
3482		if (err) {
3483			dev_err(dev, "failed to register hwrng: %d\n", err);
3484			goto err_out;
3485		} else
3486			dev_info(dev, "hwrng\n");
3487	}
3488
3489	/* register crypto algorithms the device supports */
3490	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3491		if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3492			struct talitos_crypto_alg *t_alg;
3493			struct crypto_alg *alg = NULL;
3494
3495			t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3496			if (IS_ERR(t_alg)) {
3497				err = PTR_ERR(t_alg);
3498				if (err == -ENOTSUPP)
3499					continue;
3500				goto err_out;
3501			}
3502
3503			switch (t_alg->algt.type) {
3504			case CRYPTO_ALG_TYPE_SKCIPHER:
3505				err = crypto_register_skcipher(
3506						&t_alg->algt.alg.skcipher);
3507				alg = &t_alg->algt.alg.skcipher.base;
3508				break;
3509
3510			case CRYPTO_ALG_TYPE_AEAD:
3511				err = crypto_register_aead(
3512					&t_alg->algt.alg.aead);
3513				alg = &t_alg->algt.alg.aead.base;
3514				break;
3515
3516			case CRYPTO_ALG_TYPE_AHASH:
3517				err = crypto_register_ahash(
3518						&t_alg->algt.alg.hash);
3519				alg = &t_alg->algt.alg.hash.halg.base;
3520				break;
3521			}
3522			if (err) {
3523				dev_err(dev, "%s alg registration failed\n",
3524					alg->cra_driver_name);
3525				devm_kfree(dev, t_alg);
3526			} else
3527				list_add_tail(&t_alg->entry, &priv->alg_list);
3528		}
3529	}
3530	if (!list_empty(&priv->alg_list))
3531		dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3532			 (char *)of_get_property(np, "compatible", NULL));
3533
3534	return 0;
3535
3536err_out:
3537	talitos_remove(ofdev);
3538
3539	return err;
3540}
3541
3542static const struct of_device_id talitos_match[] = {
3543#ifdef CONFIG_CRYPTO_DEV_TALITOS1
3544	{
3545		.compatible = "fsl,sec1.0",
3546	},
3547#endif
3548#ifdef CONFIG_CRYPTO_DEV_TALITOS2
3549	{
3550		.compatible = "fsl,sec2.0",
3551	},
3552#endif
3553	{},
3554};
3555MODULE_DEVICE_TABLE(of, talitos_match);
3556
3557static struct platform_driver talitos_driver = {
3558	.driver = {
3559		.name = "talitos",
3560		.of_match_table = talitos_match,
3561	},
3562	.probe = talitos_probe,
3563	.remove = talitos_remove,
3564};
3565
3566module_platform_driver(talitos_driver);
3567
3568MODULE_LICENSE("GPL");
3569MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3570MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");
v5.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * talitos - Freescale Integrated Security Engine (SEC) device driver
   4 *
   5 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
   6 *
   7 * Scatterlist Crypto API glue code copied from files with the following:
   8 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
   9 *
  10 * Crypto algorithm registration code copied from hifn driver:
  11 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
  12 * All rights reserved.
  13 */
  14
  15#include <linux/kernel.h>
  16#include <linux/module.h>
  17#include <linux/mod_devicetable.h>
  18#include <linux/device.h>
  19#include <linux/interrupt.h>
  20#include <linux/crypto.h>
  21#include <linux/hw_random.h>
  22#include <linux/of_address.h>
  23#include <linux/of_irq.h>
  24#include <linux/of_platform.h>
  25#include <linux/dma-mapping.h>
  26#include <linux/io.h>
  27#include <linux/spinlock.h>
  28#include <linux/rtnetlink.h>
  29#include <linux/slab.h>
  30
  31#include <crypto/algapi.h>
  32#include <crypto/aes.h>
  33#include <crypto/internal/des.h>
  34#include <crypto/sha.h>
 
  35#include <crypto/md5.h>
  36#include <crypto/internal/aead.h>
  37#include <crypto/authenc.h>
  38#include <crypto/skcipher.h>
  39#include <crypto/hash.h>
  40#include <crypto/internal/hash.h>
  41#include <crypto/scatterwalk.h>
  42
  43#include "talitos.h"
  44
  45static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
  46			   unsigned int len, bool is_sec1)
  47{
  48	ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
  49	if (is_sec1) {
  50		ptr->len1 = cpu_to_be16(len);
  51	} else {
  52		ptr->len = cpu_to_be16(len);
  53		ptr->eptr = upper_32_bits(dma_addr);
  54	}
  55}
  56
  57static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
  58			     struct talitos_ptr *src_ptr, bool is_sec1)
  59{
  60	dst_ptr->ptr = src_ptr->ptr;
  61	if (is_sec1) {
  62		dst_ptr->len1 = src_ptr->len1;
  63	} else {
  64		dst_ptr->len = src_ptr->len;
  65		dst_ptr->eptr = src_ptr->eptr;
  66	}
  67}
  68
  69static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
  70					   bool is_sec1)
  71{
  72	if (is_sec1)
  73		return be16_to_cpu(ptr->len1);
  74	else
  75		return be16_to_cpu(ptr->len);
  76}
  77
  78static void to_talitos_ptr_ext_set(struct talitos_ptr *ptr, u8 val,
  79				   bool is_sec1)
  80{
  81	if (!is_sec1)
  82		ptr->j_extent = val;
  83}
  84
  85static void to_talitos_ptr_ext_or(struct talitos_ptr *ptr, u8 val, bool is_sec1)
  86{
  87	if (!is_sec1)
  88		ptr->j_extent |= val;
  89}
  90
  91/*
  92 * map virtual single (contiguous) pointer to h/w descriptor pointer
  93 */
  94static void __map_single_talitos_ptr(struct device *dev,
  95				     struct talitos_ptr *ptr,
  96				     unsigned int len, void *data,
  97				     enum dma_data_direction dir,
  98				     unsigned long attrs)
  99{
 100	dma_addr_t dma_addr = dma_map_single_attrs(dev, data, len, dir, attrs);
 101	struct talitos_private *priv = dev_get_drvdata(dev);
 102	bool is_sec1 = has_ftr_sec1(priv);
 103
 104	to_talitos_ptr(ptr, dma_addr, len, is_sec1);
 105}
 106
 107static void map_single_talitos_ptr(struct device *dev,
 108				   struct talitos_ptr *ptr,
 109				   unsigned int len, void *data,
 110				   enum dma_data_direction dir)
 111{
 112	__map_single_talitos_ptr(dev, ptr, len, data, dir, 0);
 113}
 114
 115static void map_single_talitos_ptr_nosync(struct device *dev,
 116					  struct talitos_ptr *ptr,
 117					  unsigned int len, void *data,
 118					  enum dma_data_direction dir)
 119{
 120	__map_single_talitos_ptr(dev, ptr, len, data, dir,
 121				 DMA_ATTR_SKIP_CPU_SYNC);
 122}
 123
 124/*
 125 * unmap bus single (contiguous) h/w descriptor pointer
 126 */
 127static void unmap_single_talitos_ptr(struct device *dev,
 128				     struct talitos_ptr *ptr,
 129				     enum dma_data_direction dir)
 130{
 131	struct talitos_private *priv = dev_get_drvdata(dev);
 132	bool is_sec1 = has_ftr_sec1(priv);
 133
 134	dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
 135			 from_talitos_ptr_len(ptr, is_sec1), dir);
 136}
 137
 138static int reset_channel(struct device *dev, int ch)
 139{
 140	struct talitos_private *priv = dev_get_drvdata(dev);
 141	unsigned int timeout = TALITOS_TIMEOUT;
 142	bool is_sec1 = has_ftr_sec1(priv);
 143
 144	if (is_sec1) {
 145		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
 146			  TALITOS1_CCCR_LO_RESET);
 147
 148		while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
 149			TALITOS1_CCCR_LO_RESET) && --timeout)
 150			cpu_relax();
 151	} else {
 152		setbits32(priv->chan[ch].reg + TALITOS_CCCR,
 153			  TALITOS2_CCCR_RESET);
 154
 155		while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
 156			TALITOS2_CCCR_RESET) && --timeout)
 157			cpu_relax();
 158	}
 159
 160	if (timeout == 0) {
 161		dev_err(dev, "failed to reset channel %d\n", ch);
 162		return -EIO;
 163	}
 164
 165	/* set 36-bit addressing, done writeback enable and done IRQ enable */
 166	setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
 167		  TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
 168	/* enable chaining descriptors */
 169	if (is_sec1)
 170		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
 171			  TALITOS_CCCR_LO_NE);
 172
 173	/* and ICCR writeback, if available */
 174	if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
 175		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
 176		          TALITOS_CCCR_LO_IWSE);
 177
 178	return 0;
 179}
 180
 181static int reset_device(struct device *dev)
 182{
 183	struct talitos_private *priv = dev_get_drvdata(dev);
 184	unsigned int timeout = TALITOS_TIMEOUT;
 185	bool is_sec1 = has_ftr_sec1(priv);
 186	u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
 187
 188	setbits32(priv->reg + TALITOS_MCR, mcr);
 189
 190	while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
 191	       && --timeout)
 192		cpu_relax();
 193
 194	if (priv->irq[1]) {
 195		mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
 196		setbits32(priv->reg + TALITOS_MCR, mcr);
 197	}
 198
 199	if (timeout == 0) {
 200		dev_err(dev, "failed to reset device\n");
 201		return -EIO;
 202	}
 203
 204	return 0;
 205}
 206
 207/*
 208 * Reset and initialize the device
 209 */
 210static int init_device(struct device *dev)
 211{
 212	struct talitos_private *priv = dev_get_drvdata(dev);
 213	int ch, err;
 214	bool is_sec1 = has_ftr_sec1(priv);
 215
 216	/*
 217	 * Master reset
 218	 * errata documentation: warning: certain SEC interrupts
 219	 * are not fully cleared by writing the MCR:SWR bit,
 220	 * set bit twice to completely reset
 221	 */
 222	err = reset_device(dev);
 223	if (err)
 224		return err;
 225
 226	err = reset_device(dev);
 227	if (err)
 228		return err;
 229
 230	/* reset channels */
 231	for (ch = 0; ch < priv->num_channels; ch++) {
 232		err = reset_channel(dev, ch);
 233		if (err)
 234			return err;
 235	}
 236
 237	/* enable channel done and error interrupts */
 238	if (is_sec1) {
 239		clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
 240		clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
 241		/* disable parity error check in DEU (erroneous? test vect.) */
 242		setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
 243	} else {
 244		setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
 245		setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
 246	}
 247
 248	/* disable integrity check error interrupts (use writeback instead) */
 249	if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
 250		setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
 251		          TALITOS_MDEUICR_LO_ICE);
 252
 253	return 0;
 254}
 255
 256/**
 257 * talitos_submit - submits a descriptor to the device for processing
 258 * @dev:	the SEC device to be used
 259 * @ch:		the SEC device channel to be used
 260 * @desc:	the descriptor to be processed by the device
 261 * @callback:	whom to call when processing is complete
 262 * @context:	a handle for use by caller (optional)
 263 *
 264 * desc must contain valid dma-mapped (bus physical) address pointers.
 265 * callback must check err and feedback in descriptor header
 266 * for device processing status.
 267 */
 268static int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
 269			  void (*callback)(struct device *dev,
 270					   struct talitos_desc *desc,
 271					   void *context, int error),
 272			  void *context)
 273{
 274	struct talitos_private *priv = dev_get_drvdata(dev);
 275	struct talitos_request *request;
 276	unsigned long flags;
 277	int head;
 278	bool is_sec1 = has_ftr_sec1(priv);
 279
 280	spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
 281
 282	if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
 283		/* h/w fifo is full */
 284		spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
 285		return -EAGAIN;
 286	}
 287
 288	head = priv->chan[ch].head;
 289	request = &priv->chan[ch].fifo[head];
 290
 291	/* map descriptor and save caller data */
 292	if (is_sec1) {
 293		desc->hdr1 = desc->hdr;
 294		request->dma_desc = dma_map_single(dev, &desc->hdr1,
 295						   TALITOS_DESC_SIZE,
 296						   DMA_BIDIRECTIONAL);
 297	} else {
 298		request->dma_desc = dma_map_single(dev, desc,
 299						   TALITOS_DESC_SIZE,
 300						   DMA_BIDIRECTIONAL);
 301	}
 302	request->callback = callback;
 303	request->context = context;
 304
 305	/* increment fifo head */
 306	priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
 307
 308	smp_wmb();
 309	request->desc = desc;
 310
 311	/* GO! */
 312	wmb();
 313	out_be32(priv->chan[ch].reg + TALITOS_FF,
 314		 upper_32_bits(request->dma_desc));
 315	out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
 316		 lower_32_bits(request->dma_desc));
 317
 318	spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
 319
 320	return -EINPROGRESS;
 321}
 322
 323static __be32 get_request_hdr(struct talitos_request *request, bool is_sec1)
 324{
 325	struct talitos_edesc *edesc;
 326
 327	if (!is_sec1)
 328		return request->desc->hdr;
 329
 330	if (!request->desc->next_desc)
 331		return request->desc->hdr1;
 332
 333	edesc = container_of(request->desc, struct talitos_edesc, desc);
 334
 335	return ((struct talitos_desc *)(edesc->buf + edesc->dma_len))->hdr1;
 336}
 337
 338/*
 339 * process what was done, notify callback of error if not
 340 */
 341static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
 342{
 343	struct talitos_private *priv = dev_get_drvdata(dev);
 344	struct talitos_request *request, saved_req;
 345	unsigned long flags;
 346	int tail, status;
 347	bool is_sec1 = has_ftr_sec1(priv);
 348
 349	spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
 350
 351	tail = priv->chan[ch].tail;
 352	while (priv->chan[ch].fifo[tail].desc) {
 353		__be32 hdr;
 354
 355		request = &priv->chan[ch].fifo[tail];
 356
 357		/* descriptors with their done bits set don't get the error */
 358		rmb();
 359		hdr = get_request_hdr(request, is_sec1);
 360
 361		if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
 362			status = 0;
 363		else
 364			if (!error)
 365				break;
 366			else
 367				status = error;
 368
 369		dma_unmap_single(dev, request->dma_desc,
 370				 TALITOS_DESC_SIZE,
 371				 DMA_BIDIRECTIONAL);
 372
 373		/* copy entries so we can call callback outside lock */
 374		saved_req.desc = request->desc;
 375		saved_req.callback = request->callback;
 376		saved_req.context = request->context;
 377
 378		/* release request entry in fifo */
 379		smp_wmb();
 380		request->desc = NULL;
 381
 382		/* increment fifo tail */
 383		priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
 384
 385		spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
 386
 387		atomic_dec(&priv->chan[ch].submit_count);
 388
 389		saved_req.callback(dev, saved_req.desc, saved_req.context,
 390				   status);
 391		/* channel may resume processing in single desc error case */
 392		if (error && !reset_ch && status == error)
 393			return;
 394		spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
 395		tail = priv->chan[ch].tail;
 396	}
 397
 398	spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
 399}
 400
 401/*
 402 * process completed requests for channels that have done status
 403 */
 404#define DEF_TALITOS1_DONE(name, ch_done_mask)				\
 405static void talitos1_done_##name(unsigned long data)			\
 406{									\
 407	struct device *dev = (struct device *)data;			\
 408	struct talitos_private *priv = dev_get_drvdata(dev);		\
 409	unsigned long flags;						\
 410									\
 411	if (ch_done_mask & 0x10000000)					\
 412		flush_channel(dev, 0, 0, 0);			\
 413	if (ch_done_mask & 0x40000000)					\
 414		flush_channel(dev, 1, 0, 0);			\
 415	if (ch_done_mask & 0x00010000)					\
 416		flush_channel(dev, 2, 0, 0);			\
 417	if (ch_done_mask & 0x00040000)					\
 418		flush_channel(dev, 3, 0, 0);			\
 419									\
 420	/* At this point, all completed channels have been processed */	\
 421	/* Unmask done interrupts for channels completed later on. */	\
 422	spin_lock_irqsave(&priv->reg_lock, flags);			\
 423	clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);		\
 424	clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);	\
 425	spin_unlock_irqrestore(&priv->reg_lock, flags);			\
 426}
 427
 428DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
 429DEF_TALITOS1_DONE(ch0, TALITOS1_ISR_CH_0_DONE)
 430
 431#define DEF_TALITOS2_DONE(name, ch_done_mask)				\
 432static void talitos2_done_##name(unsigned long data)			\
 433{									\
 434	struct device *dev = (struct device *)data;			\
 435	struct talitos_private *priv = dev_get_drvdata(dev);		\
 436	unsigned long flags;						\
 437									\
 438	if (ch_done_mask & 1)						\
 439		flush_channel(dev, 0, 0, 0);				\
 440	if (ch_done_mask & (1 << 2))					\
 441		flush_channel(dev, 1, 0, 0);				\
 442	if (ch_done_mask & (1 << 4))					\
 443		flush_channel(dev, 2, 0, 0);				\
 444	if (ch_done_mask & (1 << 6))					\
 445		flush_channel(dev, 3, 0, 0);				\
 446									\
 447	/* At this point, all completed channels have been processed */	\
 448	/* Unmask done interrupts for channels completed later on. */	\
 449	spin_lock_irqsave(&priv->reg_lock, flags);			\
 450	setbits32(priv->reg + TALITOS_IMR, ch_done_mask);		\
 451	setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);	\
 452	spin_unlock_irqrestore(&priv->reg_lock, flags);			\
 453}
 454
 455DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
 456DEF_TALITOS2_DONE(ch0, TALITOS2_ISR_CH_0_DONE)
 457DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
 458DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
 459
 460/*
 461 * locate current (offending) descriptor
 462 */
 463static u32 current_desc_hdr(struct device *dev, int ch)
 464{
 465	struct talitos_private *priv = dev_get_drvdata(dev);
 466	int tail, iter;
 467	dma_addr_t cur_desc;
 468
 469	cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
 470	cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
 471
 472	if (!cur_desc) {
 473		dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
 474		return 0;
 475	}
 476
 477	tail = priv->chan[ch].tail;
 478
 479	iter = tail;
 480	while (priv->chan[ch].fifo[iter].dma_desc != cur_desc &&
 481	       priv->chan[ch].fifo[iter].desc->next_desc != cur_desc) {
 482		iter = (iter + 1) & (priv->fifo_len - 1);
 483		if (iter == tail) {
 484			dev_err(dev, "couldn't locate current descriptor\n");
 485			return 0;
 486		}
 487	}
 488
 489	if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc) {
 490		struct talitos_edesc *edesc;
 491
 492		edesc = container_of(priv->chan[ch].fifo[iter].desc,
 493				     struct talitos_edesc, desc);
 494		return ((struct talitos_desc *)
 495			(edesc->buf + edesc->dma_len))->hdr;
 496	}
 497
 498	return priv->chan[ch].fifo[iter].desc->hdr;
 499}
 500
 501/*
 502 * user diagnostics; report root cause of error based on execution unit status
 503 */
 504static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
 505{
 506	struct talitos_private *priv = dev_get_drvdata(dev);
 507	int i;
 508
 509	if (!desc_hdr)
 510		desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
 511
 512	switch (desc_hdr & DESC_HDR_SEL0_MASK) {
 513	case DESC_HDR_SEL0_AFEU:
 514		dev_err(dev, "AFEUISR 0x%08x_%08x\n",
 515			in_be32(priv->reg_afeu + TALITOS_EUISR),
 516			in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
 517		break;
 518	case DESC_HDR_SEL0_DEU:
 519		dev_err(dev, "DEUISR 0x%08x_%08x\n",
 520			in_be32(priv->reg_deu + TALITOS_EUISR),
 521			in_be32(priv->reg_deu + TALITOS_EUISR_LO));
 522		break;
 523	case DESC_HDR_SEL0_MDEUA:
 524	case DESC_HDR_SEL0_MDEUB:
 525		dev_err(dev, "MDEUISR 0x%08x_%08x\n",
 526			in_be32(priv->reg_mdeu + TALITOS_EUISR),
 527			in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
 528		break;
 529	case DESC_HDR_SEL0_RNG:
 530		dev_err(dev, "RNGUISR 0x%08x_%08x\n",
 531			in_be32(priv->reg_rngu + TALITOS_ISR),
 532			in_be32(priv->reg_rngu + TALITOS_ISR_LO));
 533		break;
 534	case DESC_HDR_SEL0_PKEU:
 535		dev_err(dev, "PKEUISR 0x%08x_%08x\n",
 536			in_be32(priv->reg_pkeu + TALITOS_EUISR),
 537			in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
 538		break;
 539	case DESC_HDR_SEL0_AESU:
 540		dev_err(dev, "AESUISR 0x%08x_%08x\n",
 541			in_be32(priv->reg_aesu + TALITOS_EUISR),
 542			in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
 543		break;
 544	case DESC_HDR_SEL0_CRCU:
 545		dev_err(dev, "CRCUISR 0x%08x_%08x\n",
 546			in_be32(priv->reg_crcu + TALITOS_EUISR),
 547			in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
 548		break;
 549	case DESC_HDR_SEL0_KEU:
 550		dev_err(dev, "KEUISR 0x%08x_%08x\n",
 551			in_be32(priv->reg_pkeu + TALITOS_EUISR),
 552			in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
 553		break;
 554	}
 555
 556	switch (desc_hdr & DESC_HDR_SEL1_MASK) {
 557	case DESC_HDR_SEL1_MDEUA:
 558	case DESC_HDR_SEL1_MDEUB:
 559		dev_err(dev, "MDEUISR 0x%08x_%08x\n",
 560			in_be32(priv->reg_mdeu + TALITOS_EUISR),
 561			in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
 562		break;
 563	case DESC_HDR_SEL1_CRCU:
 564		dev_err(dev, "CRCUISR 0x%08x_%08x\n",
 565			in_be32(priv->reg_crcu + TALITOS_EUISR),
 566			in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
 567		break;
 568	}
 569
 570	for (i = 0; i < 8; i++)
 571		dev_err(dev, "DESCBUF 0x%08x_%08x\n",
 572			in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
 573			in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
 574}
 575
 576/*
 577 * recover from error interrupts
 578 */
 579static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
 580{
 581	struct talitos_private *priv = dev_get_drvdata(dev);
 582	unsigned int timeout = TALITOS_TIMEOUT;
 583	int ch, error, reset_dev = 0;
 584	u32 v_lo;
 585	bool is_sec1 = has_ftr_sec1(priv);
 586	int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
 587
 588	for (ch = 0; ch < priv->num_channels; ch++) {
 589		/* skip channels without errors */
 590		if (is_sec1) {
 591			/* bits 29, 31, 17, 19 */
 592			if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
 593				continue;
 594		} else {
 595			if (!(isr & (1 << (ch * 2 + 1))))
 596				continue;
 597		}
 598
 599		error = -EINVAL;
 600
 601		v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
 602
 603		if (v_lo & TALITOS_CCPSR_LO_DOF) {
 604			dev_err(dev, "double fetch fifo overflow error\n");
 605			error = -EAGAIN;
 606			reset_ch = 1;
 607		}
 608		if (v_lo & TALITOS_CCPSR_LO_SOF) {
 609			/* h/w dropped descriptor */
 610			dev_err(dev, "single fetch fifo overflow error\n");
 611			error = -EAGAIN;
 612		}
 613		if (v_lo & TALITOS_CCPSR_LO_MDTE)
 614			dev_err(dev, "master data transfer error\n");
 615		if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
 616			dev_err(dev, is_sec1 ? "pointer not complete error\n"
 617					     : "s/g data length zero error\n");
 618		if (v_lo & TALITOS_CCPSR_LO_FPZ)
 619			dev_err(dev, is_sec1 ? "parity error\n"
 620					     : "fetch pointer zero error\n");
 621		if (v_lo & TALITOS_CCPSR_LO_IDH)
 622			dev_err(dev, "illegal descriptor header error\n");
 623		if (v_lo & TALITOS_CCPSR_LO_IEU)
 624			dev_err(dev, is_sec1 ? "static assignment error\n"
 625					     : "invalid exec unit error\n");
 626		if (v_lo & TALITOS_CCPSR_LO_EU)
 627			report_eu_error(dev, ch, current_desc_hdr(dev, ch));
 628		if (!is_sec1) {
 629			if (v_lo & TALITOS_CCPSR_LO_GB)
 630				dev_err(dev, "gather boundary error\n");
 631			if (v_lo & TALITOS_CCPSR_LO_GRL)
 632				dev_err(dev, "gather return/length error\n");
 633			if (v_lo & TALITOS_CCPSR_LO_SB)
 634				dev_err(dev, "scatter boundary error\n");
 635			if (v_lo & TALITOS_CCPSR_LO_SRL)
 636				dev_err(dev, "scatter return/length error\n");
 637		}
 638
 639		flush_channel(dev, ch, error, reset_ch);
 640
 641		if (reset_ch) {
 642			reset_channel(dev, ch);
 643		} else {
 644			setbits32(priv->chan[ch].reg + TALITOS_CCCR,
 645				  TALITOS2_CCCR_CONT);
 646			setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
 647			while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
 648			       TALITOS2_CCCR_CONT) && --timeout)
 649				cpu_relax();
 650			if (timeout == 0) {
 651				dev_err(dev, "failed to restart channel %d\n",
 652					ch);
 653				reset_dev = 1;
 654			}
 655		}
 656	}
 657	if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
 658	    (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
 659		if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
 660			dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
 661				isr, isr_lo);
 662		else
 663			dev_err(dev, "done overflow, internal time out, or "
 664				"rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
 665
 666		/* purge request queues */
 667		for (ch = 0; ch < priv->num_channels; ch++)
 668			flush_channel(dev, ch, -EIO, 1);
 669
 670		/* reset and reinitialize the device */
 671		init_device(dev);
 672	}
 673}
 674
 675#define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)	       \
 676static irqreturn_t talitos1_interrupt_##name(int irq, void *data)	       \
 677{									       \
 678	struct device *dev = data;					       \
 679	struct talitos_private *priv = dev_get_drvdata(dev);		       \
 680	u32 isr, isr_lo;						       \
 681	unsigned long flags;						       \
 682									       \
 683	spin_lock_irqsave(&priv->reg_lock, flags);			       \
 684	isr = in_be32(priv->reg + TALITOS_ISR);				       \
 685	isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);			       \
 686	/* Acknowledge interrupt */					       \
 687	out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
 688	out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);			       \
 689									       \
 690	if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) {    \
 691		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
 692		talitos_error(dev, isr & ch_err_mask, isr_lo);		       \
 693	}								       \
 694	else {								       \
 695		if (likely(isr & ch_done_mask)) {			       \
 696			/* mask further done interrupts. */		       \
 697			setbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
 698			/* done_task will unmask done interrupts at exit */    \
 699			tasklet_schedule(&priv->done_task[tlet]);	       \
 700		}							       \
 701		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
 702	}								       \
 703									       \
 704	return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
 705								IRQ_NONE;      \
 706}
 707
 708DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
 709
 710#define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)	       \
 711static irqreturn_t talitos2_interrupt_##name(int irq, void *data)	       \
 712{									       \
 713	struct device *dev = data;					       \
 714	struct talitos_private *priv = dev_get_drvdata(dev);		       \
 715	u32 isr, isr_lo;						       \
 716	unsigned long flags;						       \
 717									       \
 718	spin_lock_irqsave(&priv->reg_lock, flags);			       \
 719	isr = in_be32(priv->reg + TALITOS_ISR);				       \
 720	isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);			       \
 721	/* Acknowledge interrupt */					       \
 722	out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
 723	out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);			       \
 724									       \
 725	if (unlikely(isr & ch_err_mask || isr_lo)) {			       \
 726		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
 727		talitos_error(dev, isr & ch_err_mask, isr_lo);		       \
 728	}								       \
 729	else {								       \
 730		if (likely(isr & ch_done_mask)) {			       \
 731			/* mask further done interrupts. */		       \
 732			clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
 733			/* done_task will unmask done interrupts at exit */    \
 734			tasklet_schedule(&priv->done_task[tlet]);	       \
 735		}							       \
 736		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
 737	}								       \
 738									       \
 739	return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
 740								IRQ_NONE;      \
 741}
 742
 743DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
 744DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
 745		       0)
 746DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
 747		       1)
 748
 749/*
 750 * hwrng
 751 */
 752static int talitos_rng_data_present(struct hwrng *rng, int wait)
 753{
 754	struct device *dev = (struct device *)rng->priv;
 755	struct talitos_private *priv = dev_get_drvdata(dev);
 756	u32 ofl;
 757	int i;
 758
 759	for (i = 0; i < 20; i++) {
 760		ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
 761		      TALITOS_RNGUSR_LO_OFL;
 762		if (ofl || !wait)
 763			break;
 764		udelay(10);
 765	}
 766
 767	return !!ofl;
 768}
 769
 770static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
 771{
 772	struct device *dev = (struct device *)rng->priv;
 773	struct talitos_private *priv = dev_get_drvdata(dev);
 774
 775	/* rng fifo requires 64-bit accesses */
 776	*data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
 777	*data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
 778
 779	return sizeof(u32);
 780}
 781
 782static int talitos_rng_init(struct hwrng *rng)
 783{
 784	struct device *dev = (struct device *)rng->priv;
 785	struct talitos_private *priv = dev_get_drvdata(dev);
 786	unsigned int timeout = TALITOS_TIMEOUT;
 787
 788	setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
 789	while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
 790		 & TALITOS_RNGUSR_LO_RD)
 791	       && --timeout)
 792		cpu_relax();
 793	if (timeout == 0) {
 794		dev_err(dev, "failed to reset rng hw\n");
 795		return -ENODEV;
 796	}
 797
 798	/* start generating */
 799	setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
 800
 801	return 0;
 802}
 803
 804static int talitos_register_rng(struct device *dev)
 805{
 806	struct talitos_private *priv = dev_get_drvdata(dev);
 807	int err;
 808
 809	priv->rng.name		= dev_driver_string(dev),
 810	priv->rng.init		= talitos_rng_init,
 811	priv->rng.data_present	= talitos_rng_data_present,
 812	priv->rng.data_read	= talitos_rng_data_read,
 813	priv->rng.priv		= (unsigned long)dev;
 814
 815	err = hwrng_register(&priv->rng);
 816	if (!err)
 817		priv->rng_registered = true;
 818
 819	return err;
 820}
 821
 822static void talitos_unregister_rng(struct device *dev)
 823{
 824	struct talitos_private *priv = dev_get_drvdata(dev);
 825
 826	if (!priv->rng_registered)
 827		return;
 828
 829	hwrng_unregister(&priv->rng);
 830	priv->rng_registered = false;
 831}
 832
 833/*
 834 * crypto alg
 835 */
 836#define TALITOS_CRA_PRIORITY		3000
 837/*
 838 * Defines a priority for doing AEAD with descriptors type
 839 * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
 840 */
 841#define TALITOS_CRA_PRIORITY_AEAD_HSNA	(TALITOS_CRA_PRIORITY - 1)
 842#ifdef CONFIG_CRYPTO_DEV_TALITOS2
 843#define TALITOS_MAX_KEY_SIZE		(AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
 844#else
 845#define TALITOS_MAX_KEY_SIZE		(AES_MAX_KEY_SIZE + SHA256_BLOCK_SIZE)
 846#endif
 847#define TALITOS_MAX_IV_LENGTH		16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
 848
 849struct talitos_ctx {
 850	struct device *dev;
 851	int ch;
 852	__be32 desc_hdr_template;
 853	u8 key[TALITOS_MAX_KEY_SIZE];
 854	u8 iv[TALITOS_MAX_IV_LENGTH];
 855	dma_addr_t dma_key;
 856	unsigned int keylen;
 857	unsigned int enckeylen;
 858	unsigned int authkeylen;
 859};
 860
 861#define HASH_MAX_BLOCK_SIZE		SHA512_BLOCK_SIZE
 862#define TALITOS_MDEU_MAX_CONTEXT_SIZE	TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
 863
 864struct talitos_ahash_req_ctx {
 865	u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
 866	unsigned int hw_context_size;
 867	u8 buf[2][HASH_MAX_BLOCK_SIZE];
 868	int buf_idx;
 869	unsigned int swinit;
 870	unsigned int first;
 871	unsigned int last;
 872	unsigned int to_hash_later;
 873	unsigned int nbuf;
 874	struct scatterlist bufsl[2];
 875	struct scatterlist *psrc;
 876};
 877
 878struct talitos_export_state {
 879	u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
 880	u8 buf[HASH_MAX_BLOCK_SIZE];
 881	unsigned int swinit;
 882	unsigned int first;
 883	unsigned int last;
 884	unsigned int to_hash_later;
 885	unsigned int nbuf;
 886};
 887
 888static int aead_setkey(struct crypto_aead *authenc,
 889		       const u8 *key, unsigned int keylen)
 890{
 891	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
 892	struct device *dev = ctx->dev;
 893	struct crypto_authenc_keys keys;
 894
 895	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
 896		goto badkey;
 897
 898	if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
 899		goto badkey;
 900
 901	if (ctx->keylen)
 902		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
 903
 904	memcpy(ctx->key, keys.authkey, keys.authkeylen);
 905	memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
 906
 907	ctx->keylen = keys.authkeylen + keys.enckeylen;
 908	ctx->enckeylen = keys.enckeylen;
 909	ctx->authkeylen = keys.authkeylen;
 910	ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
 911				      DMA_TO_DEVICE);
 912
 913	memzero_explicit(&keys, sizeof(keys));
 914	return 0;
 915
 916badkey:
 917	crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
 918	memzero_explicit(&keys, sizeof(keys));
 919	return -EINVAL;
 920}
 921
 922static int aead_des3_setkey(struct crypto_aead *authenc,
 923			    const u8 *key, unsigned int keylen)
 924{
 925	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
 926	struct device *dev = ctx->dev;
 927	struct crypto_authenc_keys keys;
 928	int err;
 929
 930	err = crypto_authenc_extractkeys(&keys, key, keylen);
 931	if (unlikely(err))
 932		goto badkey;
 933
 934	err = -EINVAL;
 935	if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
 936		goto badkey;
 937
 938	err = verify_aead_des3_key(authenc, keys.enckey, keys.enckeylen);
 939	if (err)
 940		goto out;
 941
 942	if (ctx->keylen)
 943		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
 944
 945	memcpy(ctx->key, keys.authkey, keys.authkeylen);
 946	memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
 947
 948	ctx->keylen = keys.authkeylen + keys.enckeylen;
 949	ctx->enckeylen = keys.enckeylen;
 950	ctx->authkeylen = keys.authkeylen;
 951	ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
 952				      DMA_TO_DEVICE);
 953
 954out:
 955	memzero_explicit(&keys, sizeof(keys));
 956	return err;
 957
 958badkey:
 959	crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
 960	goto out;
 961}
 962
 963static void talitos_sg_unmap(struct device *dev,
 964			     struct talitos_edesc *edesc,
 965			     struct scatterlist *src,
 966			     struct scatterlist *dst,
 967			     unsigned int len, unsigned int offset)
 968{
 969	struct talitos_private *priv = dev_get_drvdata(dev);
 970	bool is_sec1 = has_ftr_sec1(priv);
 971	unsigned int src_nents = edesc->src_nents ? : 1;
 972	unsigned int dst_nents = edesc->dst_nents ? : 1;
 973
 974	if (is_sec1 && dst && dst_nents > 1) {
 975		dma_sync_single_for_device(dev, edesc->dma_link_tbl + offset,
 976					   len, DMA_FROM_DEVICE);
 977		sg_pcopy_from_buffer(dst, dst_nents, edesc->buf + offset, len,
 978				     offset);
 979	}
 980	if (src != dst) {
 981		if (src_nents == 1 || !is_sec1)
 982			dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
 983
 984		if (dst && (dst_nents == 1 || !is_sec1))
 985			dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
 986	} else if (src_nents == 1 || !is_sec1) {
 987		dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
 988	}
 989}
 990
 991static void ipsec_esp_unmap(struct device *dev,
 992			    struct talitos_edesc *edesc,
 993			    struct aead_request *areq, bool encrypt)
 994{
 995	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
 996	struct talitos_ctx *ctx = crypto_aead_ctx(aead);
 997	unsigned int ivsize = crypto_aead_ivsize(aead);
 998	unsigned int authsize = crypto_aead_authsize(aead);
 999	unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
1000	bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP;
1001	struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3];
1002
1003	if (is_ipsec_esp)
1004		unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
1005					 DMA_FROM_DEVICE);
1006	unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE);
1007
1008	talitos_sg_unmap(dev, edesc, areq->src, areq->dst,
1009			 cryptlen + authsize, areq->assoclen);
1010
1011	if (edesc->dma_len)
1012		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1013				 DMA_BIDIRECTIONAL);
1014
1015	if (!is_ipsec_esp) {
1016		unsigned int dst_nents = edesc->dst_nents ? : 1;
1017
1018		sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
1019				   areq->assoclen + cryptlen - ivsize);
1020	}
1021}
1022
1023/*
1024 * ipsec_esp descriptor callbacks
1025 */
1026static void ipsec_esp_encrypt_done(struct device *dev,
1027				   struct talitos_desc *desc, void *context,
1028				   int err)
1029{
1030	struct aead_request *areq = context;
1031	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1032	unsigned int ivsize = crypto_aead_ivsize(authenc);
1033	struct talitos_edesc *edesc;
1034
1035	edesc = container_of(desc, struct talitos_edesc, desc);
1036
1037	ipsec_esp_unmap(dev, edesc, areq, true);
1038
1039	dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
1040
1041	kfree(edesc);
1042
1043	aead_request_complete(areq, err);
1044}
1045
1046static void ipsec_esp_decrypt_swauth_done(struct device *dev,
1047					  struct talitos_desc *desc,
1048					  void *context, int err)
1049{
1050	struct aead_request *req = context;
1051	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1052	unsigned int authsize = crypto_aead_authsize(authenc);
1053	struct talitos_edesc *edesc;
1054	char *oicv, *icv;
1055
1056	edesc = container_of(desc, struct talitos_edesc, desc);
1057
1058	ipsec_esp_unmap(dev, edesc, req, false);
1059
1060	if (!err) {
1061		/* auth check */
1062		oicv = edesc->buf + edesc->dma_len;
1063		icv = oicv - authsize;
1064
1065		err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
1066	}
1067
1068	kfree(edesc);
1069
1070	aead_request_complete(req, err);
1071}
1072
1073static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
1074					  struct talitos_desc *desc,
1075					  void *context, int err)
1076{
1077	struct aead_request *req = context;
1078	struct talitos_edesc *edesc;
1079
1080	edesc = container_of(desc, struct talitos_edesc, desc);
1081
1082	ipsec_esp_unmap(dev, edesc, req, false);
1083
1084	/* check ICV auth status */
1085	if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1086		     DESC_HDR_LO_ICCR1_PASS))
1087		err = -EBADMSG;
1088
1089	kfree(edesc);
1090
1091	aead_request_complete(req, err);
1092}
1093
1094/*
1095 * convert scatterlist to SEC h/w link table format
1096 * stop at cryptlen bytes
1097 */
1098static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1099				 unsigned int offset, int datalen, int elen,
1100				 struct talitos_ptr *link_tbl_ptr)
1101{
1102	int n_sg = elen ? sg_count + 1 : sg_count;
1103	int count = 0;
1104	int cryptlen = datalen + elen;
 
1105
1106	while (cryptlen && sg && n_sg--) {
1107		unsigned int len = sg_dma_len(sg);
1108
1109		if (offset >= len) {
1110			offset -= len;
1111			goto next;
1112		}
1113
1114		len -= offset;
1115
1116		if (len > cryptlen)
1117			len = cryptlen;
1118
1119		if (datalen > 0 && len > datalen) {
1120			to_talitos_ptr(link_tbl_ptr + count,
1121				       sg_dma_address(sg) + offset, datalen, 0);
1122			to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1123			count++;
1124			len -= datalen;
1125			offset += datalen;
1126		}
1127		to_talitos_ptr(link_tbl_ptr + count,
1128			       sg_dma_address(sg) + offset, len, 0);
1129		to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1130		count++;
1131		cryptlen -= len;
1132		datalen -= len;
1133		offset = 0;
1134
1135next:
1136		sg = sg_next(sg);
1137	}
1138
1139	/* tag end of link table */
1140	if (count > 0)
1141		to_talitos_ptr_ext_set(link_tbl_ptr + count - 1,
1142				       DESC_PTR_LNKTBL_RET, 0);
1143
1144	return count;
1145}
1146
1147static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
1148			      unsigned int len, struct talitos_edesc *edesc,
1149			      struct talitos_ptr *ptr, int sg_count,
1150			      unsigned int offset, int tbl_off, int elen,
1151			      bool force)
1152{
1153	struct talitos_private *priv = dev_get_drvdata(dev);
1154	bool is_sec1 = has_ftr_sec1(priv);
 
1155
1156	if (!src) {
1157		to_talitos_ptr(ptr, 0, 0, is_sec1);
1158		return 1;
1159	}
1160	to_talitos_ptr_ext_set(ptr, elen, is_sec1);
1161	if (sg_count == 1 && !force) {
1162		to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
1163		return sg_count;
1164	}
1165	if (is_sec1) {
1166		to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1);
1167		return sg_count;
1168	}
1169	sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len, elen,
1170					 &edesc->link_tbl[tbl_off]);
1171	if (sg_count == 1 && !force) {
1172		/* Only one segment now, so no link tbl needed*/
1173		copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
1174		return sg_count;
1175	}
1176	to_talitos_ptr(ptr, edesc->dma_link_tbl +
1177			    tbl_off * sizeof(struct talitos_ptr), len, is_sec1);
1178	to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
1179
1180	return sg_count;
1181}
1182
1183static int talitos_sg_map(struct device *dev, struct scatterlist *src,
1184			  unsigned int len, struct talitos_edesc *edesc,
1185			  struct talitos_ptr *ptr, int sg_count,
1186			  unsigned int offset, int tbl_off)
1187{
1188	return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
1189				  tbl_off, 0, false);
1190}
1191
1192/*
1193 * fill in and submit ipsec_esp descriptor
1194 */
1195static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1196		     bool encrypt,
1197		     void (*callback)(struct device *dev,
1198				      struct talitos_desc *desc,
1199				      void *context, int error))
1200{
1201	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1202	unsigned int authsize = crypto_aead_authsize(aead);
1203	struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1204	struct device *dev = ctx->dev;
1205	struct talitos_desc *desc = &edesc->desc;
1206	unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
1207	unsigned int ivsize = crypto_aead_ivsize(aead);
1208	int tbl_off = 0;
1209	int sg_count, ret;
1210	int elen = 0;
1211	bool sync_needed = false;
1212	struct talitos_private *priv = dev_get_drvdata(dev);
1213	bool is_sec1 = has_ftr_sec1(priv);
1214	bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP;
1215	struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3];
1216	struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2];
1217	dma_addr_t dma_icv = edesc->dma_link_tbl + edesc->dma_len - authsize;
1218
1219	/* hmac key */
1220	to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1);
1221
1222	sg_count = edesc->src_nents ?: 1;
1223	if (is_sec1 && sg_count > 1)
1224		sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1225				  areq->assoclen + cryptlen);
1226	else
1227		sg_count = dma_map_sg(dev, areq->src, sg_count,
1228				      (areq->src == areq->dst) ?
1229				      DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1230
1231	/* hmac data */
1232	ret = talitos_sg_map(dev, areq->src, areq->assoclen, edesc,
1233			     &desc->ptr[1], sg_count, 0, tbl_off);
1234
1235	if (ret > 1) {
1236		tbl_off += ret;
1237		sync_needed = true;
1238	}
1239
1240	/* cipher iv */
1241	to_talitos_ptr(civ_ptr, edesc->iv_dma, ivsize, is_sec1);
1242
1243	/* cipher key */
1244	to_talitos_ptr(ckey_ptr, ctx->dma_key  + ctx->authkeylen,
1245		       ctx->enckeylen, is_sec1);
1246
1247	/*
1248	 * cipher in
1249	 * map and adjust cipher len to aead request cryptlen.
1250	 * extent is bytes of HMAC postpended to ciphertext,
1251	 * typically 12 for ipsec
1252	 */
1253	if (is_ipsec_esp && (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
1254		elen = authsize;
1255
1256	ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
1257				 sg_count, areq->assoclen, tbl_off, elen,
1258				 false);
1259
1260	if (ret > 1) {
1261		tbl_off += ret;
1262		sync_needed = true;
1263	}
1264
1265	/* cipher out */
1266	if (areq->src != areq->dst) {
1267		sg_count = edesc->dst_nents ? : 1;
1268		if (!is_sec1 || sg_count == 1)
1269			dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1270	}
1271
1272	if (is_ipsec_esp && encrypt)
1273		elen = authsize;
1274	else
1275		elen = 0;
1276	ret = talitos_sg_map_ext(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
1277				 sg_count, areq->assoclen, tbl_off, elen,
1278				 is_ipsec_esp && !encrypt);
1279	tbl_off += ret;
1280
1281	if (!encrypt && is_ipsec_esp) {
1282		struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1283
1284		/* Add an entry to the link table for ICV data */
1285		to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
1286		to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RET, is_sec1);
1287
1288		/* icv data follows link tables */
1289		to_talitos_ptr(tbl_ptr, dma_icv, authsize, is_sec1);
1290		to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
1291		sync_needed = true;
1292	} else if (!encrypt) {
1293		to_talitos_ptr(&desc->ptr[6], dma_icv, authsize, is_sec1);
1294		sync_needed = true;
1295	} else if (!is_ipsec_esp) {
1296		talitos_sg_map(dev, areq->dst, authsize, edesc, &desc->ptr[6],
1297			       sg_count, areq->assoclen + cryptlen, tbl_off);
1298	}
1299
1300	/* iv out */
1301	if (is_ipsec_esp)
1302		map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1303				       DMA_FROM_DEVICE);
1304
1305	if (sync_needed)
1306		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1307					   edesc->dma_len,
1308					   DMA_BIDIRECTIONAL);
1309
1310	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1311	if (ret != -EINPROGRESS) {
1312		ipsec_esp_unmap(dev, edesc, areq, encrypt);
1313		kfree(edesc);
1314	}
1315	return ret;
1316}
1317
1318/*
1319 * allocate and map the extended descriptor
1320 */
1321static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1322						 struct scatterlist *src,
1323						 struct scatterlist *dst,
1324						 u8 *iv,
1325						 unsigned int assoclen,
1326						 unsigned int cryptlen,
1327						 unsigned int authsize,
1328						 unsigned int ivsize,
1329						 int icv_stashing,
1330						 u32 cryptoflags,
1331						 bool encrypt)
1332{
1333	struct talitos_edesc *edesc;
1334	int src_nents, dst_nents, alloc_len, dma_len, src_len, dst_len;
1335	dma_addr_t iv_dma = 0;
1336	gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1337		      GFP_ATOMIC;
1338	struct talitos_private *priv = dev_get_drvdata(dev);
1339	bool is_sec1 = has_ftr_sec1(priv);
1340	int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1341
1342	if (cryptlen + authsize > max_len) {
1343		dev_err(dev, "length exceeds h/w max limit\n");
1344		return ERR_PTR(-EINVAL);
1345	}
1346
1347	if (!dst || dst == src) {
1348		src_len = assoclen + cryptlen + authsize;
1349		src_nents = sg_nents_for_len(src, src_len);
1350		if (src_nents < 0) {
1351			dev_err(dev, "Invalid number of src SG.\n");
1352			return ERR_PTR(-EINVAL);
1353		}
1354		src_nents = (src_nents == 1) ? 0 : src_nents;
1355		dst_nents = dst ? src_nents : 0;
1356		dst_len = 0;
1357	} else { /* dst && dst != src*/
1358		src_len = assoclen + cryptlen + (encrypt ? 0 : authsize);
1359		src_nents = sg_nents_for_len(src, src_len);
1360		if (src_nents < 0) {
1361			dev_err(dev, "Invalid number of src SG.\n");
1362			return ERR_PTR(-EINVAL);
1363		}
1364		src_nents = (src_nents == 1) ? 0 : src_nents;
1365		dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
1366		dst_nents = sg_nents_for_len(dst, dst_len);
1367		if (dst_nents < 0) {
1368			dev_err(dev, "Invalid number of dst SG.\n");
1369			return ERR_PTR(-EINVAL);
1370		}
1371		dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1372	}
1373
1374	/*
1375	 * allocate space for base edesc plus the link tables,
1376	 * allowing for two separate entries for AD and generated ICV (+ 2),
1377	 * and space for two sets of ICVs (stashed and generated)
1378	 */
1379	alloc_len = sizeof(struct talitos_edesc);
1380	if (src_nents || dst_nents || !encrypt) {
1381		if (is_sec1)
1382			dma_len = (src_nents ? src_len : 0) +
1383				  (dst_nents ? dst_len : 0) + authsize;
1384		else
1385			dma_len = (src_nents + dst_nents + 2) *
1386				  sizeof(struct talitos_ptr) + authsize;
1387		alloc_len += dma_len;
1388	} else {
1389		dma_len = 0;
1390	}
1391	alloc_len += icv_stashing ? authsize : 0;
1392
1393	/* if its a ahash, add space for a second desc next to the first one */
1394	if (is_sec1 && !dst)
1395		alloc_len += sizeof(struct talitos_desc);
1396	alloc_len += ivsize;
1397
1398	edesc = kmalloc(alloc_len, GFP_DMA | flags);
1399	if (!edesc)
1400		return ERR_PTR(-ENOMEM);
1401	if (ivsize) {
1402		iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize);
1403		iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1404	}
1405	memset(&edesc->desc, 0, sizeof(edesc->desc));
1406
1407	edesc->src_nents = src_nents;
1408	edesc->dst_nents = dst_nents;
1409	edesc->iv_dma = iv_dma;
1410	edesc->dma_len = dma_len;
1411	if (dma_len)
1412		edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1413						     edesc->dma_len,
1414						     DMA_BIDIRECTIONAL);
1415
1416	return edesc;
1417}
1418
1419static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1420					      int icv_stashing, bool encrypt)
1421{
1422	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1423	unsigned int authsize = crypto_aead_authsize(authenc);
1424	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1425	unsigned int ivsize = crypto_aead_ivsize(authenc);
1426	unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
1427
1428	return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1429				   iv, areq->assoclen, cryptlen,
1430				   authsize, ivsize, icv_stashing,
1431				   areq->base.flags, encrypt);
1432}
1433
1434static int aead_encrypt(struct aead_request *req)
1435{
1436	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1437	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1438	struct talitos_edesc *edesc;
1439
1440	/* allocate extended descriptor */
1441	edesc = aead_edesc_alloc(req, req->iv, 0, true);
1442	if (IS_ERR(edesc))
1443		return PTR_ERR(edesc);
1444
1445	/* set encrypt */
1446	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1447
1448	return ipsec_esp(edesc, req, true, ipsec_esp_encrypt_done);
1449}
1450
1451static int aead_decrypt(struct aead_request *req)
1452{
1453	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1454	unsigned int authsize = crypto_aead_authsize(authenc);
1455	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1456	struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1457	struct talitos_edesc *edesc;
1458	void *icvdata;
1459
1460	/* allocate extended descriptor */
1461	edesc = aead_edesc_alloc(req, req->iv, 1, false);
1462	if (IS_ERR(edesc))
1463		return PTR_ERR(edesc);
1464
1465	if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
1466	    (priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1467	    ((!edesc->src_nents && !edesc->dst_nents) ||
1468	     priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1469
1470		/* decrypt and check the ICV */
1471		edesc->desc.hdr = ctx->desc_hdr_template |
1472				  DESC_HDR_DIR_INBOUND |
1473				  DESC_HDR_MODE1_MDEU_CICV;
1474
1475		/* reset integrity check result bits */
1476
1477		return ipsec_esp(edesc, req, false,
1478				 ipsec_esp_decrypt_hwauth_done);
1479	}
1480
1481	/* Have to check the ICV with software */
1482	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1483
1484	/* stash incoming ICV for later cmp with ICV generated by the h/w */
1485	icvdata = edesc->buf + edesc->dma_len;
1486
1487	sg_pcopy_to_buffer(req->src, edesc->src_nents ? : 1, icvdata, authsize,
1488			   req->assoclen + req->cryptlen - authsize);
1489
1490	return ipsec_esp(edesc, req, false, ipsec_esp_decrypt_swauth_done);
1491}
1492
1493static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1494			     const u8 *key, unsigned int keylen)
1495{
1496	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1497	struct device *dev = ctx->dev;
1498
1499	if (ctx->keylen)
1500		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
1501
1502	memcpy(&ctx->key, key, keylen);
1503	ctx->keylen = keylen;
1504
1505	ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
1506
1507	return 0;
1508}
1509
1510static int ablkcipher_des_setkey(struct crypto_ablkcipher *cipher,
1511				 const u8 *key, unsigned int keylen)
1512{
1513	return verify_ablkcipher_des_key(cipher, key) ?:
1514	       ablkcipher_setkey(cipher, key, keylen);
1515}
1516
1517static int ablkcipher_des3_setkey(struct crypto_ablkcipher *cipher,
1518				  const u8 *key, unsigned int keylen)
1519{
1520	return verify_ablkcipher_des3_key(cipher, key) ?:
1521	       ablkcipher_setkey(cipher, key, keylen);
1522}
1523
1524static int ablkcipher_aes_setkey(struct crypto_ablkcipher *cipher,
1525				  const u8 *key, unsigned int keylen)
1526{
1527	if (keylen == AES_KEYSIZE_128 || keylen == AES_KEYSIZE_192 ||
1528	    keylen == AES_KEYSIZE_256)
1529		return ablkcipher_setkey(cipher, key, keylen);
1530
1531	crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1532
1533	return -EINVAL;
1534}
1535
1536static void common_nonsnoop_unmap(struct device *dev,
1537				  struct talitos_edesc *edesc,
1538				  struct ablkcipher_request *areq)
1539{
1540	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1541
1542	talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->nbytes, 0);
1543	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1544
1545	if (edesc->dma_len)
1546		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1547				 DMA_BIDIRECTIONAL);
1548}
1549
1550static void ablkcipher_done(struct device *dev,
1551			    struct talitos_desc *desc, void *context,
1552			    int err)
1553{
1554	struct ablkcipher_request *areq = context;
1555	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1556	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1557	unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1558	struct talitos_edesc *edesc;
1559
1560	edesc = container_of(desc, struct talitos_edesc, desc);
1561
1562	common_nonsnoop_unmap(dev, edesc, areq);
1563	memcpy(areq->info, ctx->iv, ivsize);
1564
1565	kfree(edesc);
1566
1567	areq->base.complete(&areq->base, err);
1568}
1569
1570static int common_nonsnoop(struct talitos_edesc *edesc,
1571			   struct ablkcipher_request *areq,
1572			   void (*callback) (struct device *dev,
1573					     struct talitos_desc *desc,
1574					     void *context, int error))
1575{
1576	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1577	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1578	struct device *dev = ctx->dev;
1579	struct talitos_desc *desc = &edesc->desc;
1580	unsigned int cryptlen = areq->nbytes;
1581	unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1582	int sg_count, ret;
1583	bool sync_needed = false;
1584	struct talitos_private *priv = dev_get_drvdata(dev);
1585	bool is_sec1 = has_ftr_sec1(priv);
 
 
1586
1587	/* first DWORD empty */
1588
1589	/* cipher iv */
1590	to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, ivsize, is_sec1);
1591
1592	/* cipher key */
1593	to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen, is_sec1);
1594
1595	sg_count = edesc->src_nents ?: 1;
1596	if (is_sec1 && sg_count > 1)
1597		sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1598				  cryptlen);
1599	else
1600		sg_count = dma_map_sg(dev, areq->src, sg_count,
1601				      (areq->src == areq->dst) ?
1602				      DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1603	/*
1604	 * cipher in
1605	 */
1606	sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
1607				  &desc->ptr[3], sg_count, 0, 0);
1608	if (sg_count > 1)
1609		sync_needed = true;
1610
1611	/* cipher out */
1612	if (areq->src != areq->dst) {
1613		sg_count = edesc->dst_nents ? : 1;
1614		if (!is_sec1 || sg_count == 1)
1615			dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1616	}
1617
1618	ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[4],
1619			     sg_count, 0, (edesc->src_nents + 1));
1620	if (ret > 1)
1621		sync_needed = true;
1622
1623	/* iv out */
1624	map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1625			       DMA_FROM_DEVICE);
1626
1627	/* last DWORD empty */
1628
1629	if (sync_needed)
1630		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1631					   edesc->dma_len, DMA_BIDIRECTIONAL);
1632
1633	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1634	if (ret != -EINPROGRESS) {
1635		common_nonsnoop_unmap(dev, edesc, areq);
1636		kfree(edesc);
1637	}
1638	return ret;
1639}
1640
1641static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1642						    areq, bool encrypt)
1643{
1644	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1645	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1646	unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1647
1648	return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1649				   areq->info, 0, areq->nbytes, 0, ivsize, 0,
1650				   areq->base.flags, encrypt);
1651}
1652
1653static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1654{
1655	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1656	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1657	struct talitos_edesc *edesc;
1658	unsigned int blocksize =
1659			crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
1660
1661	if (!areq->nbytes)
1662		return 0;
1663
1664	if (areq->nbytes % blocksize)
1665		return -EINVAL;
1666
1667	/* allocate extended descriptor */
1668	edesc = ablkcipher_edesc_alloc(areq, true);
1669	if (IS_ERR(edesc))
1670		return PTR_ERR(edesc);
1671
1672	/* set encrypt */
1673	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1674
1675	return common_nonsnoop(edesc, areq, ablkcipher_done);
1676}
1677
1678static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1679{
1680	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1681	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1682	struct talitos_edesc *edesc;
1683	unsigned int blocksize =
1684			crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
1685
1686	if (!areq->nbytes)
1687		return 0;
1688
1689	if (areq->nbytes % blocksize)
1690		return -EINVAL;
1691
1692	/* allocate extended descriptor */
1693	edesc = ablkcipher_edesc_alloc(areq, false);
1694	if (IS_ERR(edesc))
1695		return PTR_ERR(edesc);
1696
1697	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1698
1699	return common_nonsnoop(edesc, areq, ablkcipher_done);
1700}
1701
1702static void common_nonsnoop_hash_unmap(struct device *dev,
1703				       struct talitos_edesc *edesc,
1704				       struct ahash_request *areq)
1705{
1706	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
 
1707	struct talitos_private *priv = dev_get_drvdata(dev);
1708	bool is_sec1 = has_ftr_sec1(priv);
1709	struct talitos_desc *desc = &edesc->desc;
1710	struct talitos_desc *desc2 = (struct talitos_desc *)
1711				     (edesc->buf + edesc->dma_len);
1712
1713	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1714	if (desc->next_desc &&
1715	    desc->ptr[5].ptr != desc2->ptr[5].ptr)
1716		unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
 
 
 
1717
1718	if (req_ctx->psrc)
1719		talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
1720
1721	/* When using hashctx-in, must unmap it. */
1722	if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
1723		unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1724					 DMA_TO_DEVICE);
1725	else if (desc->next_desc)
1726		unmap_single_talitos_ptr(dev, &desc2->ptr[1],
1727					 DMA_TO_DEVICE);
1728
1729	if (is_sec1 && req_ctx->nbuf)
1730		unmap_single_talitos_ptr(dev, &desc->ptr[3],
1731					 DMA_TO_DEVICE);
1732
1733	if (edesc->dma_len)
1734		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1735				 DMA_BIDIRECTIONAL);
1736
1737	if (edesc->desc.next_desc)
1738		dma_unmap_single(dev, be32_to_cpu(edesc->desc.next_desc),
1739				 TALITOS_DESC_SIZE, DMA_BIDIRECTIONAL);
1740}
1741
1742static void ahash_done(struct device *dev,
1743		       struct talitos_desc *desc, void *context,
1744		       int err)
1745{
1746	struct ahash_request *areq = context;
1747	struct talitos_edesc *edesc =
1748		 container_of(desc, struct talitos_edesc, desc);
1749	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1750
1751	if (!req_ctx->last && req_ctx->to_hash_later) {
1752		/* Position any partial block for next update/final/finup */
1753		req_ctx->buf_idx = (req_ctx->buf_idx + 1) & 1;
1754		req_ctx->nbuf = req_ctx->to_hash_later;
1755	}
1756	common_nonsnoop_hash_unmap(dev, edesc, areq);
1757
1758	kfree(edesc);
1759
1760	areq->base.complete(&areq->base, err);
1761}
1762
1763/*
1764 * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1765 * ourself and submit a padded block
1766 */
1767static void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1768			       struct talitos_edesc *edesc,
1769			       struct talitos_ptr *ptr)
1770{
1771	static u8 padded_hash[64] = {
1772		0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1773		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1774		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1775		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1776	};
1777
1778	pr_err_once("Bug in SEC1, padding ourself\n");
1779	edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1780	map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1781			       (char *)padded_hash, DMA_TO_DEVICE);
1782}
1783
1784static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1785				struct ahash_request *areq, unsigned int length,
1786				void (*callback) (struct device *dev,
1787						  struct talitos_desc *desc,
1788						  void *context, int error))
1789{
1790	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1791	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1792	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1793	struct device *dev = ctx->dev;
1794	struct talitos_desc *desc = &edesc->desc;
1795	int ret;
1796	bool sync_needed = false;
1797	struct talitos_private *priv = dev_get_drvdata(dev);
1798	bool is_sec1 = has_ftr_sec1(priv);
1799	int sg_count;
1800
1801	/* first DWORD empty */
1802
1803	/* hash context in */
1804	if (!req_ctx->first || req_ctx->swinit) {
1805		map_single_talitos_ptr_nosync(dev, &desc->ptr[1],
1806					      req_ctx->hw_context_size,
1807					      req_ctx->hw_context,
1808					      DMA_TO_DEVICE);
1809		req_ctx->swinit = 0;
1810	}
1811	/* Indicate next op is not the first. */
1812	req_ctx->first = 0;
1813
1814	/* HMAC key */
1815	if (ctx->keylen)
1816		to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen,
1817			       is_sec1);
1818
1819	if (is_sec1 && req_ctx->nbuf)
1820		length -= req_ctx->nbuf;
1821
1822	sg_count = edesc->src_nents ?: 1;
1823	if (is_sec1 && sg_count > 1)
1824		sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length);
1825	else if (length)
1826		sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
1827				      DMA_TO_DEVICE);
1828	/*
1829	 * data in
1830	 */
1831	if (is_sec1 && req_ctx->nbuf) {
1832		map_single_talitos_ptr(dev, &desc->ptr[3], req_ctx->nbuf,
1833				       req_ctx->buf[req_ctx->buf_idx],
1834				       DMA_TO_DEVICE);
1835	} else {
1836		sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1837					  &desc->ptr[3], sg_count, 0, 0);
1838		if (sg_count > 1)
1839			sync_needed = true;
1840	}
1841
1842	/* fifth DWORD empty */
1843
1844	/* hash/HMAC out -or- hash context out */
1845	if (req_ctx->last)
1846		map_single_talitos_ptr(dev, &desc->ptr[5],
1847				       crypto_ahash_digestsize(tfm),
1848				       areq->result, DMA_FROM_DEVICE);
1849	else
1850		map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1851					      req_ctx->hw_context_size,
1852					      req_ctx->hw_context,
1853					      DMA_FROM_DEVICE);
1854
1855	/* last DWORD empty */
1856
1857	if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1858		talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1859
1860	if (is_sec1 && req_ctx->nbuf && length) {
1861		struct talitos_desc *desc2 = (struct talitos_desc *)
1862					     (edesc->buf + edesc->dma_len);
1863		dma_addr_t next_desc;
1864
1865		memset(desc2, 0, sizeof(*desc2));
1866		desc2->hdr = desc->hdr;
1867		desc2->hdr &= ~DESC_HDR_MODE0_MDEU_INIT;
1868		desc2->hdr1 = desc2->hdr;
1869		desc->hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1870		desc->hdr |= DESC_HDR_MODE0_MDEU_CONT;
1871		desc->hdr &= ~DESC_HDR_DONE_NOTIFY;
1872
1873		if (desc->ptr[1].ptr)
1874			copy_talitos_ptr(&desc2->ptr[1], &desc->ptr[1],
1875					 is_sec1);
1876		else
1877			map_single_talitos_ptr_nosync(dev, &desc2->ptr[1],
1878						      req_ctx->hw_context_size,
1879						      req_ctx->hw_context,
1880						      DMA_TO_DEVICE);
1881		copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
1882		sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1883					  &desc2->ptr[3], sg_count, 0, 0);
1884		if (sg_count > 1)
1885			sync_needed = true;
1886		copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
1887		if (req_ctx->last)
1888			map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1889						      req_ctx->hw_context_size,
1890						      req_ctx->hw_context,
1891						      DMA_FROM_DEVICE);
1892
1893		next_desc = dma_map_single(dev, &desc2->hdr1, TALITOS_DESC_SIZE,
1894					   DMA_BIDIRECTIONAL);
1895		desc->next_desc = cpu_to_be32(next_desc);
1896	}
1897
1898	if (sync_needed)
1899		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1900					   edesc->dma_len, DMA_BIDIRECTIONAL);
1901
1902	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1903	if (ret != -EINPROGRESS) {
1904		common_nonsnoop_hash_unmap(dev, edesc, areq);
1905		kfree(edesc);
1906	}
1907	return ret;
1908}
1909
1910static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1911					       unsigned int nbytes)
1912{
1913	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1914	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1915	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1916	struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1917	bool is_sec1 = has_ftr_sec1(priv);
1918
1919	if (is_sec1)
1920		nbytes -= req_ctx->nbuf;
1921
1922	return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
1923				   nbytes, 0, 0, 0, areq->base.flags, false);
1924}
1925
1926static int ahash_init(struct ahash_request *areq)
1927{
1928	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1929	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1930	struct device *dev = ctx->dev;
1931	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1932	unsigned int size;
1933	dma_addr_t dma;
1934
1935	/* Initialize the context */
1936	req_ctx->buf_idx = 0;
1937	req_ctx->nbuf = 0;
1938	req_ctx->first = 1; /* first indicates h/w must init its context */
1939	req_ctx->swinit = 0; /* assume h/w init of context */
1940	size =	(crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1941			? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1942			: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1943	req_ctx->hw_context_size = size;
1944
1945	dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
1946			     DMA_TO_DEVICE);
1947	dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
1948
1949	return 0;
1950}
1951
1952/*
1953 * on h/w without explicit sha224 support, we initialize h/w context
1954 * manually with sha224 constants, and tell it to run sha256.
1955 */
1956static int ahash_init_sha224_swinit(struct ahash_request *areq)
1957{
1958	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1959
1960	req_ctx->hw_context[0] = SHA224_H0;
1961	req_ctx->hw_context[1] = SHA224_H1;
1962	req_ctx->hw_context[2] = SHA224_H2;
1963	req_ctx->hw_context[3] = SHA224_H3;
1964	req_ctx->hw_context[4] = SHA224_H4;
1965	req_ctx->hw_context[5] = SHA224_H5;
1966	req_ctx->hw_context[6] = SHA224_H6;
1967	req_ctx->hw_context[7] = SHA224_H7;
1968
1969	/* init 64-bit count */
1970	req_ctx->hw_context[8] = 0;
1971	req_ctx->hw_context[9] = 0;
1972
1973	ahash_init(areq);
1974	req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1975
1976	return 0;
1977}
1978
1979static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1980{
1981	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1982	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1983	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1984	struct talitos_edesc *edesc;
1985	unsigned int blocksize =
1986			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1987	unsigned int nbytes_to_hash;
1988	unsigned int to_hash_later;
1989	unsigned int nsg;
1990	int nents;
1991	struct device *dev = ctx->dev;
1992	struct talitos_private *priv = dev_get_drvdata(dev);
1993	bool is_sec1 = has_ftr_sec1(priv);
1994	u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
1995
1996	if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1997		/* Buffer up to one whole block */
1998		nents = sg_nents_for_len(areq->src, nbytes);
1999		if (nents < 0) {
2000			dev_err(ctx->dev, "Invalid number of src SG.\n");
2001			return nents;
2002		}
2003		sg_copy_to_buffer(areq->src, nents,
2004				  ctx_buf + req_ctx->nbuf, nbytes);
2005		req_ctx->nbuf += nbytes;
2006		return 0;
2007	}
2008
2009	/* At least (blocksize + 1) bytes are available to hash */
2010	nbytes_to_hash = nbytes + req_ctx->nbuf;
2011	to_hash_later = nbytes_to_hash & (blocksize - 1);
2012
2013	if (req_ctx->last)
2014		to_hash_later = 0;
2015	else if (to_hash_later)
2016		/* There is a partial block. Hash the full block(s) now */
2017		nbytes_to_hash -= to_hash_later;
2018	else {
2019		/* Keep one block buffered */
2020		nbytes_to_hash -= blocksize;
2021		to_hash_later = blocksize;
2022	}
2023
2024	/* Chain in any previously buffered data */
2025	if (!is_sec1 && req_ctx->nbuf) {
2026		nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
2027		sg_init_table(req_ctx->bufsl, nsg);
2028		sg_set_buf(req_ctx->bufsl, ctx_buf, req_ctx->nbuf);
2029		if (nsg > 1)
2030			sg_chain(req_ctx->bufsl, 2, areq->src);
2031		req_ctx->psrc = req_ctx->bufsl;
2032	} else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
2033		int offset;
2034
2035		if (nbytes_to_hash > blocksize)
2036			offset = blocksize - req_ctx->nbuf;
2037		else
2038			offset = nbytes_to_hash - req_ctx->nbuf;
2039		nents = sg_nents_for_len(areq->src, offset);
2040		if (nents < 0) {
2041			dev_err(ctx->dev, "Invalid number of src SG.\n");
2042			return nents;
2043		}
2044		sg_copy_to_buffer(areq->src, nents,
2045				  ctx_buf + req_ctx->nbuf, offset);
2046		req_ctx->nbuf += offset;
2047		req_ctx->psrc = scatterwalk_ffwd(req_ctx->bufsl, areq->src,
2048						 offset);
2049	} else
2050		req_ctx->psrc = areq->src;
2051
2052	if (to_hash_later) {
2053		nents = sg_nents_for_len(areq->src, nbytes);
2054		if (nents < 0) {
2055			dev_err(ctx->dev, "Invalid number of src SG.\n");
2056			return nents;
2057		}
2058		sg_pcopy_to_buffer(areq->src, nents,
2059				   req_ctx->buf[(req_ctx->buf_idx + 1) & 1],
2060				      to_hash_later,
2061				      nbytes - to_hash_later);
2062	}
2063	req_ctx->to_hash_later = to_hash_later;
2064
2065	/* Allocate extended descriptor */
2066	edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
2067	if (IS_ERR(edesc))
2068		return PTR_ERR(edesc);
2069
2070	edesc->desc.hdr = ctx->desc_hdr_template;
2071
2072	/* On last one, request SEC to pad; otherwise continue */
2073	if (req_ctx->last)
2074		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
2075	else
2076		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
2077
2078	/* request SEC to INIT hash. */
2079	if (req_ctx->first && !req_ctx->swinit)
2080		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
2081
2082	/* When the tfm context has a keylen, it's an HMAC.
2083	 * A first or last (ie. not middle) descriptor must request HMAC.
2084	 */
2085	if (ctx->keylen && (req_ctx->first || req_ctx->last))
2086		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
2087
2088	return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, ahash_done);
2089}
2090
2091static int ahash_update(struct ahash_request *areq)
2092{
2093	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2094
2095	req_ctx->last = 0;
2096
2097	return ahash_process_req(areq, areq->nbytes);
2098}
2099
2100static int ahash_final(struct ahash_request *areq)
2101{
2102	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2103
2104	req_ctx->last = 1;
2105
2106	return ahash_process_req(areq, 0);
2107}
2108
2109static int ahash_finup(struct ahash_request *areq)
2110{
2111	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2112
2113	req_ctx->last = 1;
2114
2115	return ahash_process_req(areq, areq->nbytes);
2116}
2117
2118static int ahash_digest(struct ahash_request *areq)
2119{
2120	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2121	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
 
2122
2123	ahash->init(areq);
2124	req_ctx->last = 1;
2125
2126	return ahash_process_req(areq, areq->nbytes);
2127}
2128
2129static int ahash_export(struct ahash_request *areq, void *out)
2130{
2131	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2132	struct talitos_export_state *export = out;
2133	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2134	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2135	struct device *dev = ctx->dev;
2136	dma_addr_t dma;
2137
2138	dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2139			     DMA_FROM_DEVICE);
2140	dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_FROM_DEVICE);
2141
2142	memcpy(export->hw_context, req_ctx->hw_context,
2143	       req_ctx->hw_context_size);
2144	memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf);
2145	export->swinit = req_ctx->swinit;
2146	export->first = req_ctx->first;
2147	export->last = req_ctx->last;
2148	export->to_hash_later = req_ctx->to_hash_later;
2149	export->nbuf = req_ctx->nbuf;
2150
2151	return 0;
2152}
2153
2154static int ahash_import(struct ahash_request *areq, const void *in)
2155{
2156	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2157	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2158	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2159	struct device *dev = ctx->dev;
2160	const struct talitos_export_state *export = in;
2161	unsigned int size;
2162	dma_addr_t dma;
2163
2164	memset(req_ctx, 0, sizeof(*req_ctx));
2165	size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
2166			? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2167			: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
2168	req_ctx->hw_context_size = size;
2169	memcpy(req_ctx->hw_context, export->hw_context, size);
2170	memcpy(req_ctx->buf[0], export->buf, export->nbuf);
2171	req_ctx->swinit = export->swinit;
2172	req_ctx->first = export->first;
2173	req_ctx->last = export->last;
2174	req_ctx->to_hash_later = export->to_hash_later;
2175	req_ctx->nbuf = export->nbuf;
2176
2177	dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2178			     DMA_TO_DEVICE);
2179	dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
2180
2181	return 0;
2182}
2183
2184static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2185		   u8 *hash)
2186{
2187	struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2188
2189	struct scatterlist sg[1];
2190	struct ahash_request *req;
2191	struct crypto_wait wait;
2192	int ret;
2193
2194	crypto_init_wait(&wait);
2195
2196	req = ahash_request_alloc(tfm, GFP_KERNEL);
2197	if (!req)
2198		return -ENOMEM;
2199
2200	/* Keep tfm keylen == 0 during hash of the long key */
2201	ctx->keylen = 0;
2202	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2203				   crypto_req_done, &wait);
2204
2205	sg_init_one(&sg[0], key, keylen);
2206
2207	ahash_request_set_crypt(req, sg, hash, keylen);
2208	ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
2209
2210	ahash_request_free(req);
2211
2212	return ret;
2213}
2214
2215static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2216			unsigned int keylen)
2217{
2218	struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2219	struct device *dev = ctx->dev;
2220	unsigned int blocksize =
2221			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2222	unsigned int digestsize = crypto_ahash_digestsize(tfm);
2223	unsigned int keysize = keylen;
2224	u8 hash[SHA512_DIGEST_SIZE];
2225	int ret;
2226
2227	if (keylen <= blocksize)
2228		memcpy(ctx->key, key, keysize);
2229	else {
2230		/* Must get the hash of the long key */
2231		ret = keyhash(tfm, key, keylen, hash);
2232
2233		if (ret) {
2234			crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2235			return -EINVAL;
2236		}
2237
2238		keysize = digestsize;
2239		memcpy(ctx->key, hash, digestsize);
2240	}
2241
2242	if (ctx->keylen)
2243		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
2244
2245	ctx->keylen = keysize;
2246	ctx->dma_key = dma_map_single(dev, ctx->key, keysize, DMA_TO_DEVICE);
2247
2248	return 0;
2249}
2250
2251
2252struct talitos_alg_template {
2253	u32 type;
2254	u32 priority;
2255	union {
2256		struct crypto_alg crypto;
2257		struct ahash_alg hash;
2258		struct aead_alg aead;
2259	} alg;
2260	__be32 desc_hdr_template;
2261};
2262
2263static struct talitos_alg_template driver_algs[] = {
2264	/* AEAD algorithms.  These use a single-pass ipsec_esp descriptor */
2265	{	.type = CRYPTO_ALG_TYPE_AEAD,
2266		.alg.aead = {
2267			.base = {
2268				.cra_name = "authenc(hmac(sha1),cbc(aes))",
2269				.cra_driver_name = "authenc-hmac-sha1-"
2270						   "cbc-aes-talitos",
2271				.cra_blocksize = AES_BLOCK_SIZE,
2272				.cra_flags = CRYPTO_ALG_ASYNC,
 
2273			},
2274			.ivsize = AES_BLOCK_SIZE,
2275			.maxauthsize = SHA1_DIGEST_SIZE,
2276		},
2277		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2278			             DESC_HDR_SEL0_AESU |
2279		                     DESC_HDR_MODE0_AESU_CBC |
2280		                     DESC_HDR_SEL1_MDEUA |
2281		                     DESC_HDR_MODE1_MDEU_INIT |
2282		                     DESC_HDR_MODE1_MDEU_PAD |
2283		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2284	},
2285	{	.type = CRYPTO_ALG_TYPE_AEAD,
2286		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2287		.alg.aead = {
2288			.base = {
2289				.cra_name = "authenc(hmac(sha1),cbc(aes))",
2290				.cra_driver_name = "authenc-hmac-sha1-"
2291						   "cbc-aes-talitos-hsna",
2292				.cra_blocksize = AES_BLOCK_SIZE,
2293				.cra_flags = CRYPTO_ALG_ASYNC,
 
2294			},
2295			.ivsize = AES_BLOCK_SIZE,
2296			.maxauthsize = SHA1_DIGEST_SIZE,
2297		},
2298		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2299				     DESC_HDR_SEL0_AESU |
2300				     DESC_HDR_MODE0_AESU_CBC |
2301				     DESC_HDR_SEL1_MDEUA |
2302				     DESC_HDR_MODE1_MDEU_INIT |
2303				     DESC_HDR_MODE1_MDEU_PAD |
2304				     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2305	},
2306	{	.type = CRYPTO_ALG_TYPE_AEAD,
2307		.alg.aead = {
2308			.base = {
2309				.cra_name = "authenc(hmac(sha1),"
2310					    "cbc(des3_ede))",
2311				.cra_driver_name = "authenc-hmac-sha1-"
2312						   "cbc-3des-talitos",
2313				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2314				.cra_flags = CRYPTO_ALG_ASYNC,
 
2315			},
2316			.ivsize = DES3_EDE_BLOCK_SIZE,
2317			.maxauthsize = SHA1_DIGEST_SIZE,
2318			.setkey = aead_des3_setkey,
2319		},
2320		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2321			             DESC_HDR_SEL0_DEU |
2322		                     DESC_HDR_MODE0_DEU_CBC |
2323		                     DESC_HDR_MODE0_DEU_3DES |
2324		                     DESC_HDR_SEL1_MDEUA |
2325		                     DESC_HDR_MODE1_MDEU_INIT |
2326		                     DESC_HDR_MODE1_MDEU_PAD |
2327		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2328	},
2329	{	.type = CRYPTO_ALG_TYPE_AEAD,
2330		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2331		.alg.aead = {
2332			.base = {
2333				.cra_name = "authenc(hmac(sha1),"
2334					    "cbc(des3_ede))",
2335				.cra_driver_name = "authenc-hmac-sha1-"
2336						   "cbc-3des-talitos-hsna",
2337				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2338				.cra_flags = CRYPTO_ALG_ASYNC,
 
2339			},
2340			.ivsize = DES3_EDE_BLOCK_SIZE,
2341			.maxauthsize = SHA1_DIGEST_SIZE,
2342			.setkey = aead_des3_setkey,
2343		},
2344		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2345				     DESC_HDR_SEL0_DEU |
2346				     DESC_HDR_MODE0_DEU_CBC |
2347				     DESC_HDR_MODE0_DEU_3DES |
2348				     DESC_HDR_SEL1_MDEUA |
2349				     DESC_HDR_MODE1_MDEU_INIT |
2350				     DESC_HDR_MODE1_MDEU_PAD |
2351				     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2352	},
2353	{       .type = CRYPTO_ALG_TYPE_AEAD,
2354		.alg.aead = {
2355			.base = {
2356				.cra_name = "authenc(hmac(sha224),cbc(aes))",
2357				.cra_driver_name = "authenc-hmac-sha224-"
2358						   "cbc-aes-talitos",
2359				.cra_blocksize = AES_BLOCK_SIZE,
2360				.cra_flags = CRYPTO_ALG_ASYNC,
 
2361			},
2362			.ivsize = AES_BLOCK_SIZE,
2363			.maxauthsize = SHA224_DIGEST_SIZE,
2364		},
2365		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2366				     DESC_HDR_SEL0_AESU |
2367				     DESC_HDR_MODE0_AESU_CBC |
2368				     DESC_HDR_SEL1_MDEUA |
2369				     DESC_HDR_MODE1_MDEU_INIT |
2370				     DESC_HDR_MODE1_MDEU_PAD |
2371				     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2372	},
2373	{       .type = CRYPTO_ALG_TYPE_AEAD,
2374		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2375		.alg.aead = {
2376			.base = {
2377				.cra_name = "authenc(hmac(sha224),cbc(aes))",
2378				.cra_driver_name = "authenc-hmac-sha224-"
2379						   "cbc-aes-talitos-hsna",
2380				.cra_blocksize = AES_BLOCK_SIZE,
2381				.cra_flags = CRYPTO_ALG_ASYNC,
 
2382			},
2383			.ivsize = AES_BLOCK_SIZE,
2384			.maxauthsize = SHA224_DIGEST_SIZE,
2385		},
2386		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2387				     DESC_HDR_SEL0_AESU |
2388				     DESC_HDR_MODE0_AESU_CBC |
2389				     DESC_HDR_SEL1_MDEUA |
2390				     DESC_HDR_MODE1_MDEU_INIT |
2391				     DESC_HDR_MODE1_MDEU_PAD |
2392				     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2393	},
2394	{	.type = CRYPTO_ALG_TYPE_AEAD,
2395		.alg.aead = {
2396			.base = {
2397				.cra_name = "authenc(hmac(sha224),"
2398					    "cbc(des3_ede))",
2399				.cra_driver_name = "authenc-hmac-sha224-"
2400						   "cbc-3des-talitos",
2401				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2402				.cra_flags = CRYPTO_ALG_ASYNC,
 
2403			},
2404			.ivsize = DES3_EDE_BLOCK_SIZE,
2405			.maxauthsize = SHA224_DIGEST_SIZE,
2406			.setkey = aead_des3_setkey,
2407		},
2408		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2409			             DESC_HDR_SEL0_DEU |
2410		                     DESC_HDR_MODE0_DEU_CBC |
2411		                     DESC_HDR_MODE0_DEU_3DES |
2412		                     DESC_HDR_SEL1_MDEUA |
2413		                     DESC_HDR_MODE1_MDEU_INIT |
2414		                     DESC_HDR_MODE1_MDEU_PAD |
2415		                     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2416	},
2417	{	.type = CRYPTO_ALG_TYPE_AEAD,
2418		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2419		.alg.aead = {
2420			.base = {
2421				.cra_name = "authenc(hmac(sha224),"
2422					    "cbc(des3_ede))",
2423				.cra_driver_name = "authenc-hmac-sha224-"
2424						   "cbc-3des-talitos-hsna",
2425				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2426				.cra_flags = CRYPTO_ALG_ASYNC,
 
2427			},
2428			.ivsize = DES3_EDE_BLOCK_SIZE,
2429			.maxauthsize = SHA224_DIGEST_SIZE,
2430			.setkey = aead_des3_setkey,
2431		},
2432		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2433				     DESC_HDR_SEL0_DEU |
2434				     DESC_HDR_MODE0_DEU_CBC |
2435				     DESC_HDR_MODE0_DEU_3DES |
2436				     DESC_HDR_SEL1_MDEUA |
2437				     DESC_HDR_MODE1_MDEU_INIT |
2438				     DESC_HDR_MODE1_MDEU_PAD |
2439				     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2440	},
2441	{	.type = CRYPTO_ALG_TYPE_AEAD,
2442		.alg.aead = {
2443			.base = {
2444				.cra_name = "authenc(hmac(sha256),cbc(aes))",
2445				.cra_driver_name = "authenc-hmac-sha256-"
2446						   "cbc-aes-talitos",
2447				.cra_blocksize = AES_BLOCK_SIZE,
2448				.cra_flags = CRYPTO_ALG_ASYNC,
 
2449			},
2450			.ivsize = AES_BLOCK_SIZE,
2451			.maxauthsize = SHA256_DIGEST_SIZE,
2452		},
2453		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2454			             DESC_HDR_SEL0_AESU |
2455		                     DESC_HDR_MODE0_AESU_CBC |
2456		                     DESC_HDR_SEL1_MDEUA |
2457		                     DESC_HDR_MODE1_MDEU_INIT |
2458		                     DESC_HDR_MODE1_MDEU_PAD |
2459		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2460	},
2461	{	.type = CRYPTO_ALG_TYPE_AEAD,
2462		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2463		.alg.aead = {
2464			.base = {
2465				.cra_name = "authenc(hmac(sha256),cbc(aes))",
2466				.cra_driver_name = "authenc-hmac-sha256-"
2467						   "cbc-aes-talitos-hsna",
2468				.cra_blocksize = AES_BLOCK_SIZE,
2469				.cra_flags = CRYPTO_ALG_ASYNC,
 
2470			},
2471			.ivsize = AES_BLOCK_SIZE,
2472			.maxauthsize = SHA256_DIGEST_SIZE,
2473		},
2474		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2475				     DESC_HDR_SEL0_AESU |
2476				     DESC_HDR_MODE0_AESU_CBC |
2477				     DESC_HDR_SEL1_MDEUA |
2478				     DESC_HDR_MODE1_MDEU_INIT |
2479				     DESC_HDR_MODE1_MDEU_PAD |
2480				     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2481	},
2482	{	.type = CRYPTO_ALG_TYPE_AEAD,
2483		.alg.aead = {
2484			.base = {
2485				.cra_name = "authenc(hmac(sha256),"
2486					    "cbc(des3_ede))",
2487				.cra_driver_name = "authenc-hmac-sha256-"
2488						   "cbc-3des-talitos",
2489				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2490				.cra_flags = CRYPTO_ALG_ASYNC,
 
2491			},
2492			.ivsize = DES3_EDE_BLOCK_SIZE,
2493			.maxauthsize = SHA256_DIGEST_SIZE,
2494			.setkey = aead_des3_setkey,
2495		},
2496		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2497			             DESC_HDR_SEL0_DEU |
2498		                     DESC_HDR_MODE0_DEU_CBC |
2499		                     DESC_HDR_MODE0_DEU_3DES |
2500		                     DESC_HDR_SEL1_MDEUA |
2501		                     DESC_HDR_MODE1_MDEU_INIT |
2502		                     DESC_HDR_MODE1_MDEU_PAD |
2503		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2504	},
2505	{	.type = CRYPTO_ALG_TYPE_AEAD,
2506		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2507		.alg.aead = {
2508			.base = {
2509				.cra_name = "authenc(hmac(sha256),"
2510					    "cbc(des3_ede))",
2511				.cra_driver_name = "authenc-hmac-sha256-"
2512						   "cbc-3des-talitos-hsna",
2513				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2514				.cra_flags = CRYPTO_ALG_ASYNC,
 
2515			},
2516			.ivsize = DES3_EDE_BLOCK_SIZE,
2517			.maxauthsize = SHA256_DIGEST_SIZE,
2518			.setkey = aead_des3_setkey,
2519		},
2520		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2521				     DESC_HDR_SEL0_DEU |
2522				     DESC_HDR_MODE0_DEU_CBC |
2523				     DESC_HDR_MODE0_DEU_3DES |
2524				     DESC_HDR_SEL1_MDEUA |
2525				     DESC_HDR_MODE1_MDEU_INIT |
2526				     DESC_HDR_MODE1_MDEU_PAD |
2527				     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2528	},
2529	{	.type = CRYPTO_ALG_TYPE_AEAD,
2530		.alg.aead = {
2531			.base = {
2532				.cra_name = "authenc(hmac(sha384),cbc(aes))",
2533				.cra_driver_name = "authenc-hmac-sha384-"
2534						   "cbc-aes-talitos",
2535				.cra_blocksize = AES_BLOCK_SIZE,
2536				.cra_flags = CRYPTO_ALG_ASYNC,
 
2537			},
2538			.ivsize = AES_BLOCK_SIZE,
2539			.maxauthsize = SHA384_DIGEST_SIZE,
2540		},
2541		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2542			             DESC_HDR_SEL0_AESU |
2543		                     DESC_HDR_MODE0_AESU_CBC |
2544		                     DESC_HDR_SEL1_MDEUB |
2545		                     DESC_HDR_MODE1_MDEU_INIT |
2546		                     DESC_HDR_MODE1_MDEU_PAD |
2547		                     DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2548	},
2549	{	.type = CRYPTO_ALG_TYPE_AEAD,
2550		.alg.aead = {
2551			.base = {
2552				.cra_name = "authenc(hmac(sha384),"
2553					    "cbc(des3_ede))",
2554				.cra_driver_name = "authenc-hmac-sha384-"
2555						   "cbc-3des-talitos",
2556				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2557				.cra_flags = CRYPTO_ALG_ASYNC,
 
2558			},
2559			.ivsize = DES3_EDE_BLOCK_SIZE,
2560			.maxauthsize = SHA384_DIGEST_SIZE,
2561			.setkey = aead_des3_setkey,
2562		},
2563		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2564			             DESC_HDR_SEL0_DEU |
2565		                     DESC_HDR_MODE0_DEU_CBC |
2566		                     DESC_HDR_MODE0_DEU_3DES |
2567		                     DESC_HDR_SEL1_MDEUB |
2568		                     DESC_HDR_MODE1_MDEU_INIT |
2569		                     DESC_HDR_MODE1_MDEU_PAD |
2570		                     DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2571	},
2572	{	.type = CRYPTO_ALG_TYPE_AEAD,
2573		.alg.aead = {
2574			.base = {
2575				.cra_name = "authenc(hmac(sha512),cbc(aes))",
2576				.cra_driver_name = "authenc-hmac-sha512-"
2577						   "cbc-aes-talitos",
2578				.cra_blocksize = AES_BLOCK_SIZE,
2579				.cra_flags = CRYPTO_ALG_ASYNC,
 
2580			},
2581			.ivsize = AES_BLOCK_SIZE,
2582			.maxauthsize = SHA512_DIGEST_SIZE,
2583		},
2584		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2585			             DESC_HDR_SEL0_AESU |
2586		                     DESC_HDR_MODE0_AESU_CBC |
2587		                     DESC_HDR_SEL1_MDEUB |
2588		                     DESC_HDR_MODE1_MDEU_INIT |
2589		                     DESC_HDR_MODE1_MDEU_PAD |
2590		                     DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2591	},
2592	{	.type = CRYPTO_ALG_TYPE_AEAD,
2593		.alg.aead = {
2594			.base = {
2595				.cra_name = "authenc(hmac(sha512),"
2596					    "cbc(des3_ede))",
2597				.cra_driver_name = "authenc-hmac-sha512-"
2598						   "cbc-3des-talitos",
2599				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2600				.cra_flags = CRYPTO_ALG_ASYNC,
 
2601			},
2602			.ivsize = DES3_EDE_BLOCK_SIZE,
2603			.maxauthsize = SHA512_DIGEST_SIZE,
2604			.setkey = aead_des3_setkey,
2605		},
2606		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2607			             DESC_HDR_SEL0_DEU |
2608		                     DESC_HDR_MODE0_DEU_CBC |
2609		                     DESC_HDR_MODE0_DEU_3DES |
2610		                     DESC_HDR_SEL1_MDEUB |
2611		                     DESC_HDR_MODE1_MDEU_INIT |
2612		                     DESC_HDR_MODE1_MDEU_PAD |
2613		                     DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2614	},
2615	{	.type = CRYPTO_ALG_TYPE_AEAD,
2616		.alg.aead = {
2617			.base = {
2618				.cra_name = "authenc(hmac(md5),cbc(aes))",
2619				.cra_driver_name = "authenc-hmac-md5-"
2620						   "cbc-aes-talitos",
2621				.cra_blocksize = AES_BLOCK_SIZE,
2622				.cra_flags = CRYPTO_ALG_ASYNC,
 
2623			},
2624			.ivsize = AES_BLOCK_SIZE,
2625			.maxauthsize = MD5_DIGEST_SIZE,
2626		},
2627		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2628			             DESC_HDR_SEL0_AESU |
2629		                     DESC_HDR_MODE0_AESU_CBC |
2630		                     DESC_HDR_SEL1_MDEUA |
2631		                     DESC_HDR_MODE1_MDEU_INIT |
2632		                     DESC_HDR_MODE1_MDEU_PAD |
2633		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2634	},
2635	{	.type = CRYPTO_ALG_TYPE_AEAD,
2636		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2637		.alg.aead = {
2638			.base = {
2639				.cra_name = "authenc(hmac(md5),cbc(aes))",
2640				.cra_driver_name = "authenc-hmac-md5-"
2641						   "cbc-aes-talitos-hsna",
2642				.cra_blocksize = AES_BLOCK_SIZE,
2643				.cra_flags = CRYPTO_ALG_ASYNC,
 
2644			},
2645			.ivsize = AES_BLOCK_SIZE,
2646			.maxauthsize = MD5_DIGEST_SIZE,
2647		},
2648		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2649				     DESC_HDR_SEL0_AESU |
2650				     DESC_HDR_MODE0_AESU_CBC |
2651				     DESC_HDR_SEL1_MDEUA |
2652				     DESC_HDR_MODE1_MDEU_INIT |
2653				     DESC_HDR_MODE1_MDEU_PAD |
2654				     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2655	},
2656	{	.type = CRYPTO_ALG_TYPE_AEAD,
2657		.alg.aead = {
2658			.base = {
2659				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2660				.cra_driver_name = "authenc-hmac-md5-"
2661						   "cbc-3des-talitos",
2662				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2663				.cra_flags = CRYPTO_ALG_ASYNC,
 
2664			},
2665			.ivsize = DES3_EDE_BLOCK_SIZE,
2666			.maxauthsize = MD5_DIGEST_SIZE,
2667			.setkey = aead_des3_setkey,
2668		},
2669		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2670			             DESC_HDR_SEL0_DEU |
2671		                     DESC_HDR_MODE0_DEU_CBC |
2672		                     DESC_HDR_MODE0_DEU_3DES |
2673		                     DESC_HDR_SEL1_MDEUA |
2674		                     DESC_HDR_MODE1_MDEU_INIT |
2675		                     DESC_HDR_MODE1_MDEU_PAD |
2676		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2677	},
2678	{	.type = CRYPTO_ALG_TYPE_AEAD,
2679		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2680		.alg.aead = {
2681			.base = {
2682				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2683				.cra_driver_name = "authenc-hmac-md5-"
2684						   "cbc-3des-talitos-hsna",
2685				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2686				.cra_flags = CRYPTO_ALG_ASYNC,
 
2687			},
2688			.ivsize = DES3_EDE_BLOCK_SIZE,
2689			.maxauthsize = MD5_DIGEST_SIZE,
2690			.setkey = aead_des3_setkey,
2691		},
2692		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2693				     DESC_HDR_SEL0_DEU |
2694				     DESC_HDR_MODE0_DEU_CBC |
2695				     DESC_HDR_MODE0_DEU_3DES |
2696				     DESC_HDR_SEL1_MDEUA |
2697				     DESC_HDR_MODE1_MDEU_INIT |
2698				     DESC_HDR_MODE1_MDEU_PAD |
2699				     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2700	},
2701	/* ABLKCIPHER algorithms. */
2702	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2703		.alg.crypto = {
2704			.cra_name = "ecb(aes)",
2705			.cra_driver_name = "ecb-aes-talitos",
2706			.cra_blocksize = AES_BLOCK_SIZE,
2707			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2708				     CRYPTO_ALG_ASYNC,
2709			.cra_ablkcipher = {
2710				.min_keysize = AES_MIN_KEY_SIZE,
2711				.max_keysize = AES_MAX_KEY_SIZE,
2712				.setkey = ablkcipher_aes_setkey,
2713			}
2714		},
2715		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2716				     DESC_HDR_SEL0_AESU,
2717	},
2718	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2719		.alg.crypto = {
2720			.cra_name = "cbc(aes)",
2721			.cra_driver_name = "cbc-aes-talitos",
2722			.cra_blocksize = AES_BLOCK_SIZE,
2723			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2724                                     CRYPTO_ALG_ASYNC,
2725			.cra_ablkcipher = {
2726				.min_keysize = AES_MIN_KEY_SIZE,
2727				.max_keysize = AES_MAX_KEY_SIZE,
2728				.ivsize = AES_BLOCK_SIZE,
2729				.setkey = ablkcipher_aes_setkey,
2730			}
2731		},
2732		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2733				     DESC_HDR_SEL0_AESU |
2734				     DESC_HDR_MODE0_AESU_CBC,
2735	},
2736	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2737		.alg.crypto = {
2738			.cra_name = "ctr(aes)",
2739			.cra_driver_name = "ctr-aes-talitos",
2740			.cra_blocksize = 1,
2741			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2742				     CRYPTO_ALG_ASYNC,
2743			.cra_ablkcipher = {
2744				.min_keysize = AES_MIN_KEY_SIZE,
2745				.max_keysize = AES_MAX_KEY_SIZE,
2746				.ivsize = AES_BLOCK_SIZE,
2747				.setkey = ablkcipher_aes_setkey,
2748			}
2749		},
2750		.desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
2751				     DESC_HDR_SEL0_AESU |
2752				     DESC_HDR_MODE0_AESU_CTR,
2753	},
2754	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2755		.alg.crypto = {
2756			.cra_name = "ecb(des)",
2757			.cra_driver_name = "ecb-des-talitos",
2758			.cra_blocksize = DES_BLOCK_SIZE,
2759			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2760				     CRYPTO_ALG_ASYNC,
2761			.cra_ablkcipher = {
2762				.min_keysize = DES_KEY_SIZE,
2763				.max_keysize = DES_KEY_SIZE,
2764				.setkey = ablkcipher_des_setkey,
2765			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2766		},
2767		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2768				     DESC_HDR_SEL0_DEU,
2769	},
2770	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2771		.alg.crypto = {
2772			.cra_name = "cbc(des)",
2773			.cra_driver_name = "cbc-des-talitos",
2774			.cra_blocksize = DES_BLOCK_SIZE,
2775			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2776				     CRYPTO_ALG_ASYNC,
2777			.cra_ablkcipher = {
2778				.min_keysize = DES_KEY_SIZE,
2779				.max_keysize = DES_KEY_SIZE,
2780				.ivsize = DES_BLOCK_SIZE,
2781				.setkey = ablkcipher_des_setkey,
2782			}
2783		},
2784		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2785				     DESC_HDR_SEL0_DEU |
2786				     DESC_HDR_MODE0_DEU_CBC,
2787	},
2788	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2789		.alg.crypto = {
2790			.cra_name = "ecb(des3_ede)",
2791			.cra_driver_name = "ecb-3des-talitos",
2792			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2793			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2794				     CRYPTO_ALG_ASYNC,
2795			.cra_ablkcipher = {
2796				.min_keysize = DES3_EDE_KEY_SIZE,
2797				.max_keysize = DES3_EDE_KEY_SIZE,
2798				.setkey = ablkcipher_des3_setkey,
2799			}
2800		},
2801		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2802				     DESC_HDR_SEL0_DEU |
2803				     DESC_HDR_MODE0_DEU_3DES,
2804	},
2805	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2806		.alg.crypto = {
2807			.cra_name = "cbc(des3_ede)",
2808			.cra_driver_name = "cbc-3des-talitos",
2809			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2810			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2811                                     CRYPTO_ALG_ASYNC,
2812			.cra_ablkcipher = {
2813				.min_keysize = DES3_EDE_KEY_SIZE,
2814				.max_keysize = DES3_EDE_KEY_SIZE,
2815				.ivsize = DES3_EDE_BLOCK_SIZE,
2816				.setkey = ablkcipher_des3_setkey,
2817			}
2818		},
2819		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2820			             DESC_HDR_SEL0_DEU |
2821		                     DESC_HDR_MODE0_DEU_CBC |
2822		                     DESC_HDR_MODE0_DEU_3DES,
2823	},
2824	/* AHASH algorithms. */
2825	{	.type = CRYPTO_ALG_TYPE_AHASH,
2826		.alg.hash = {
2827			.halg.digestsize = MD5_DIGEST_SIZE,
2828			.halg.statesize = sizeof(struct talitos_export_state),
2829			.halg.base = {
2830				.cra_name = "md5",
2831				.cra_driver_name = "md5-talitos",
2832				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2833				.cra_flags = CRYPTO_ALG_ASYNC,
 
2834			}
2835		},
2836		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2837				     DESC_HDR_SEL0_MDEUA |
2838				     DESC_HDR_MODE0_MDEU_MD5,
2839	},
2840	{	.type = CRYPTO_ALG_TYPE_AHASH,
2841		.alg.hash = {
2842			.halg.digestsize = SHA1_DIGEST_SIZE,
2843			.halg.statesize = sizeof(struct talitos_export_state),
2844			.halg.base = {
2845				.cra_name = "sha1",
2846				.cra_driver_name = "sha1-talitos",
2847				.cra_blocksize = SHA1_BLOCK_SIZE,
2848				.cra_flags = CRYPTO_ALG_ASYNC,
 
2849			}
2850		},
2851		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2852				     DESC_HDR_SEL0_MDEUA |
2853				     DESC_HDR_MODE0_MDEU_SHA1,
2854	},
2855	{	.type = CRYPTO_ALG_TYPE_AHASH,
2856		.alg.hash = {
2857			.halg.digestsize = SHA224_DIGEST_SIZE,
2858			.halg.statesize = sizeof(struct talitos_export_state),
2859			.halg.base = {
2860				.cra_name = "sha224",
2861				.cra_driver_name = "sha224-talitos",
2862				.cra_blocksize = SHA224_BLOCK_SIZE,
2863				.cra_flags = CRYPTO_ALG_ASYNC,
 
2864			}
2865		},
2866		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2867				     DESC_HDR_SEL0_MDEUA |
2868				     DESC_HDR_MODE0_MDEU_SHA224,
2869	},
2870	{	.type = CRYPTO_ALG_TYPE_AHASH,
2871		.alg.hash = {
2872			.halg.digestsize = SHA256_DIGEST_SIZE,
2873			.halg.statesize = sizeof(struct talitos_export_state),
2874			.halg.base = {
2875				.cra_name = "sha256",
2876				.cra_driver_name = "sha256-talitos",
2877				.cra_blocksize = SHA256_BLOCK_SIZE,
2878				.cra_flags = CRYPTO_ALG_ASYNC,
 
2879			}
2880		},
2881		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2882				     DESC_HDR_SEL0_MDEUA |
2883				     DESC_HDR_MODE0_MDEU_SHA256,
2884	},
2885	{	.type = CRYPTO_ALG_TYPE_AHASH,
2886		.alg.hash = {
2887			.halg.digestsize = SHA384_DIGEST_SIZE,
2888			.halg.statesize = sizeof(struct talitos_export_state),
2889			.halg.base = {
2890				.cra_name = "sha384",
2891				.cra_driver_name = "sha384-talitos",
2892				.cra_blocksize = SHA384_BLOCK_SIZE,
2893				.cra_flags = CRYPTO_ALG_ASYNC,
 
2894			}
2895		},
2896		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2897				     DESC_HDR_SEL0_MDEUB |
2898				     DESC_HDR_MODE0_MDEUB_SHA384,
2899	},
2900	{	.type = CRYPTO_ALG_TYPE_AHASH,
2901		.alg.hash = {
2902			.halg.digestsize = SHA512_DIGEST_SIZE,
2903			.halg.statesize = sizeof(struct talitos_export_state),
2904			.halg.base = {
2905				.cra_name = "sha512",
2906				.cra_driver_name = "sha512-talitos",
2907				.cra_blocksize = SHA512_BLOCK_SIZE,
2908				.cra_flags = CRYPTO_ALG_ASYNC,
 
2909			}
2910		},
2911		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2912				     DESC_HDR_SEL0_MDEUB |
2913				     DESC_HDR_MODE0_MDEUB_SHA512,
2914	},
2915	{	.type = CRYPTO_ALG_TYPE_AHASH,
2916		.alg.hash = {
2917			.halg.digestsize = MD5_DIGEST_SIZE,
2918			.halg.statesize = sizeof(struct talitos_export_state),
2919			.halg.base = {
2920				.cra_name = "hmac(md5)",
2921				.cra_driver_name = "hmac-md5-talitos",
2922				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2923				.cra_flags = CRYPTO_ALG_ASYNC,
 
2924			}
2925		},
2926		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2927				     DESC_HDR_SEL0_MDEUA |
2928				     DESC_HDR_MODE0_MDEU_MD5,
2929	},
2930	{	.type = CRYPTO_ALG_TYPE_AHASH,
2931		.alg.hash = {
2932			.halg.digestsize = SHA1_DIGEST_SIZE,
2933			.halg.statesize = sizeof(struct talitos_export_state),
2934			.halg.base = {
2935				.cra_name = "hmac(sha1)",
2936				.cra_driver_name = "hmac-sha1-talitos",
2937				.cra_blocksize = SHA1_BLOCK_SIZE,
2938				.cra_flags = CRYPTO_ALG_ASYNC,
 
2939			}
2940		},
2941		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2942				     DESC_HDR_SEL0_MDEUA |
2943				     DESC_HDR_MODE0_MDEU_SHA1,
2944	},
2945	{	.type = CRYPTO_ALG_TYPE_AHASH,
2946		.alg.hash = {
2947			.halg.digestsize = SHA224_DIGEST_SIZE,
2948			.halg.statesize = sizeof(struct talitos_export_state),
2949			.halg.base = {
2950				.cra_name = "hmac(sha224)",
2951				.cra_driver_name = "hmac-sha224-talitos",
2952				.cra_blocksize = SHA224_BLOCK_SIZE,
2953				.cra_flags = CRYPTO_ALG_ASYNC,
 
2954			}
2955		},
2956		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2957				     DESC_HDR_SEL0_MDEUA |
2958				     DESC_HDR_MODE0_MDEU_SHA224,
2959	},
2960	{	.type = CRYPTO_ALG_TYPE_AHASH,
2961		.alg.hash = {
2962			.halg.digestsize = SHA256_DIGEST_SIZE,
2963			.halg.statesize = sizeof(struct talitos_export_state),
2964			.halg.base = {
2965				.cra_name = "hmac(sha256)",
2966				.cra_driver_name = "hmac-sha256-talitos",
2967				.cra_blocksize = SHA256_BLOCK_SIZE,
2968				.cra_flags = CRYPTO_ALG_ASYNC,
 
2969			}
2970		},
2971		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2972				     DESC_HDR_SEL0_MDEUA |
2973				     DESC_HDR_MODE0_MDEU_SHA256,
2974	},
2975	{	.type = CRYPTO_ALG_TYPE_AHASH,
2976		.alg.hash = {
2977			.halg.digestsize = SHA384_DIGEST_SIZE,
2978			.halg.statesize = sizeof(struct talitos_export_state),
2979			.halg.base = {
2980				.cra_name = "hmac(sha384)",
2981				.cra_driver_name = "hmac-sha384-talitos",
2982				.cra_blocksize = SHA384_BLOCK_SIZE,
2983				.cra_flags = CRYPTO_ALG_ASYNC,
 
2984			}
2985		},
2986		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2987				     DESC_HDR_SEL0_MDEUB |
2988				     DESC_HDR_MODE0_MDEUB_SHA384,
2989	},
2990	{	.type = CRYPTO_ALG_TYPE_AHASH,
2991		.alg.hash = {
2992			.halg.digestsize = SHA512_DIGEST_SIZE,
2993			.halg.statesize = sizeof(struct talitos_export_state),
2994			.halg.base = {
2995				.cra_name = "hmac(sha512)",
2996				.cra_driver_name = "hmac-sha512-talitos",
2997				.cra_blocksize = SHA512_BLOCK_SIZE,
2998				.cra_flags = CRYPTO_ALG_ASYNC,
 
2999			}
3000		},
3001		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3002				     DESC_HDR_SEL0_MDEUB |
3003				     DESC_HDR_MODE0_MDEUB_SHA512,
3004	}
3005};
3006
3007struct talitos_crypto_alg {
3008	struct list_head entry;
3009	struct device *dev;
3010	struct talitos_alg_template algt;
3011};
3012
3013static int talitos_init_common(struct talitos_ctx *ctx,
3014			       struct talitos_crypto_alg *talitos_alg)
3015{
3016	struct talitos_private *priv;
3017
3018	/* update context with ptr to dev */
3019	ctx->dev = talitos_alg->dev;
3020
3021	/* assign SEC channel to tfm in round-robin fashion */
3022	priv = dev_get_drvdata(ctx->dev);
3023	ctx->ch = atomic_inc_return(&priv->last_chan) &
3024		  (priv->num_channels - 1);
3025
3026	/* copy descriptor header template value */
3027	ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
3028
3029	/* select done notification */
3030	ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
3031
3032	return 0;
3033}
3034
3035static int talitos_cra_init(struct crypto_tfm *tfm)
3036{
3037	struct crypto_alg *alg = tfm->__crt_alg;
3038	struct talitos_crypto_alg *talitos_alg;
3039	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3040
3041	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
3042		talitos_alg = container_of(__crypto_ahash_alg(alg),
3043					   struct talitos_crypto_alg,
3044					   algt.alg.hash);
3045	else
3046		talitos_alg = container_of(alg, struct talitos_crypto_alg,
3047					   algt.alg.crypto);
3048
3049	return talitos_init_common(ctx, talitos_alg);
3050}
3051
3052static int talitos_cra_init_aead(struct crypto_aead *tfm)
3053{
3054	struct aead_alg *alg = crypto_aead_alg(tfm);
3055	struct talitos_crypto_alg *talitos_alg;
3056	struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
3057
3058	talitos_alg = container_of(alg, struct talitos_crypto_alg,
3059				   algt.alg.aead);
3060
3061	return talitos_init_common(ctx, talitos_alg);
3062}
3063
3064static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
3065{
 
 
3066	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3067
3068	talitos_cra_init(tfm);
 
 
3069
3070	ctx->keylen = 0;
3071	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
3072				 sizeof(struct talitos_ahash_req_ctx));
3073
3074	return 0;
3075}
3076
3077static void talitos_cra_exit(struct crypto_tfm *tfm)
3078{
3079	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3080	struct device *dev = ctx->dev;
3081
3082	if (ctx->keylen)
3083		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
3084}
3085
3086/*
3087 * given the alg's descriptor header template, determine whether descriptor
3088 * type and primary/secondary execution units required match the hw
3089 * capabilities description provided in the device tree node.
3090 */
3091static int hw_supports(struct device *dev, __be32 desc_hdr_template)
3092{
3093	struct talitos_private *priv = dev_get_drvdata(dev);
3094	int ret;
3095
3096	ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
3097	      (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
3098
3099	if (SECONDARY_EU(desc_hdr_template))
3100		ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
3101		              & priv->exec_units);
3102
3103	return ret;
3104}
3105
3106static int talitos_remove(struct platform_device *ofdev)
3107{
3108	struct device *dev = &ofdev->dev;
3109	struct talitos_private *priv = dev_get_drvdata(dev);
3110	struct talitos_crypto_alg *t_alg, *n;
3111	int i;
3112
3113	list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
3114		switch (t_alg->algt.type) {
3115		case CRYPTO_ALG_TYPE_ABLKCIPHER:
 
3116			break;
3117		case CRYPTO_ALG_TYPE_AEAD:
3118			crypto_unregister_aead(&t_alg->algt.alg.aead);
3119			break;
3120		case CRYPTO_ALG_TYPE_AHASH:
3121			crypto_unregister_ahash(&t_alg->algt.alg.hash);
3122			break;
3123		}
3124		list_del(&t_alg->entry);
3125	}
3126
3127	if (hw_supports(dev, DESC_HDR_SEL0_RNG))
3128		talitos_unregister_rng(dev);
3129
3130	for (i = 0; i < 2; i++)
3131		if (priv->irq[i]) {
3132			free_irq(priv->irq[i], dev);
3133			irq_dispose_mapping(priv->irq[i]);
3134		}
3135
3136	tasklet_kill(&priv->done_task[0]);
3137	if (priv->irq[1])
3138		tasklet_kill(&priv->done_task[1]);
3139
3140	return 0;
3141}
3142
3143static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
3144						    struct talitos_alg_template
3145						           *template)
3146{
3147	struct talitos_private *priv = dev_get_drvdata(dev);
3148	struct talitos_crypto_alg *t_alg;
3149	struct crypto_alg *alg;
3150
3151	t_alg = devm_kzalloc(dev, sizeof(struct talitos_crypto_alg),
3152			     GFP_KERNEL);
3153	if (!t_alg)
3154		return ERR_PTR(-ENOMEM);
3155
3156	t_alg->algt = *template;
3157
3158	switch (t_alg->algt.type) {
3159	case CRYPTO_ALG_TYPE_ABLKCIPHER:
3160		alg = &t_alg->algt.alg.crypto;
3161		alg->cra_init = talitos_cra_init;
3162		alg->cra_exit = talitos_cra_exit;
3163		alg->cra_type = &crypto_ablkcipher_type;
3164		alg->cra_ablkcipher.setkey = alg->cra_ablkcipher.setkey ?:
3165					     ablkcipher_setkey;
3166		alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
3167		alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
 
 
 
 
 
 
3168		break;
3169	case CRYPTO_ALG_TYPE_AEAD:
3170		alg = &t_alg->algt.alg.aead.base;
3171		alg->cra_exit = talitos_cra_exit;
3172		t_alg->algt.alg.aead.init = talitos_cra_init_aead;
3173		t_alg->algt.alg.aead.setkey = t_alg->algt.alg.aead.setkey ?:
3174					      aead_setkey;
3175		t_alg->algt.alg.aead.encrypt = aead_encrypt;
3176		t_alg->algt.alg.aead.decrypt = aead_decrypt;
3177		if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3178		    !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) {
3179			devm_kfree(dev, t_alg);
3180			return ERR_PTR(-ENOTSUPP);
3181		}
3182		break;
3183	case CRYPTO_ALG_TYPE_AHASH:
3184		alg = &t_alg->algt.alg.hash.halg.base;
3185		alg->cra_init = talitos_cra_init_ahash;
3186		alg->cra_exit = talitos_cra_exit;
3187		t_alg->algt.alg.hash.init = ahash_init;
3188		t_alg->algt.alg.hash.update = ahash_update;
3189		t_alg->algt.alg.hash.final = ahash_final;
3190		t_alg->algt.alg.hash.finup = ahash_finup;
3191		t_alg->algt.alg.hash.digest = ahash_digest;
3192		if (!strncmp(alg->cra_name, "hmac", 4))
3193			t_alg->algt.alg.hash.setkey = ahash_setkey;
3194		t_alg->algt.alg.hash.import = ahash_import;
3195		t_alg->algt.alg.hash.export = ahash_export;
3196
3197		if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
3198		    !strncmp(alg->cra_name, "hmac", 4)) {
3199			devm_kfree(dev, t_alg);
3200			return ERR_PTR(-ENOTSUPP);
3201		}
3202		if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3203		    (!strcmp(alg->cra_name, "sha224") ||
3204		     !strcmp(alg->cra_name, "hmac(sha224)"))) {
3205			t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
 
 
3206			t_alg->algt.desc_hdr_template =
3207					DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3208					DESC_HDR_SEL0_MDEUA |
3209					DESC_HDR_MODE0_MDEU_SHA256;
3210		}
3211		break;
3212	default:
3213		dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
3214		devm_kfree(dev, t_alg);
3215		return ERR_PTR(-EINVAL);
3216	}
3217
3218	alg->cra_module = THIS_MODULE;
3219	if (t_alg->algt.priority)
3220		alg->cra_priority = t_alg->algt.priority;
3221	else
3222		alg->cra_priority = TALITOS_CRA_PRIORITY;
3223	if (has_ftr_sec1(priv))
3224		alg->cra_alignmask = 3;
3225	else
3226		alg->cra_alignmask = 0;
3227	alg->cra_ctxsize = sizeof(struct talitos_ctx);
3228	alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
3229
3230	t_alg->dev = dev;
3231
3232	return t_alg;
3233}
3234
3235static int talitos_probe_irq(struct platform_device *ofdev)
3236{
3237	struct device *dev = &ofdev->dev;
3238	struct device_node *np = ofdev->dev.of_node;
3239	struct talitos_private *priv = dev_get_drvdata(dev);
3240	int err;
3241	bool is_sec1 = has_ftr_sec1(priv);
3242
3243	priv->irq[0] = irq_of_parse_and_map(np, 0);
3244	if (!priv->irq[0]) {
3245		dev_err(dev, "failed to map irq\n");
3246		return -EINVAL;
3247	}
3248	if (is_sec1) {
3249		err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
3250				  dev_driver_string(dev), dev);
3251		goto primary_out;
3252	}
3253
3254	priv->irq[1] = irq_of_parse_and_map(np, 1);
3255
3256	/* get the primary irq line */
3257	if (!priv->irq[1]) {
3258		err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
3259				  dev_driver_string(dev), dev);
3260		goto primary_out;
3261	}
3262
3263	err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
3264			  dev_driver_string(dev), dev);
3265	if (err)
3266		goto primary_out;
3267
3268	/* get the secondary irq line */
3269	err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
3270			  dev_driver_string(dev), dev);
3271	if (err) {
3272		dev_err(dev, "failed to request secondary irq\n");
3273		irq_dispose_mapping(priv->irq[1]);
3274		priv->irq[1] = 0;
3275	}
3276
3277	return err;
3278
3279primary_out:
3280	if (err) {
3281		dev_err(dev, "failed to request primary irq\n");
3282		irq_dispose_mapping(priv->irq[0]);
3283		priv->irq[0] = 0;
3284	}
3285
3286	return err;
3287}
3288
3289static int talitos_probe(struct platform_device *ofdev)
3290{
3291	struct device *dev = &ofdev->dev;
3292	struct device_node *np = ofdev->dev.of_node;
3293	struct talitos_private *priv;
3294	int i, err;
3295	int stride;
3296	struct resource *res;
3297
3298	priv = devm_kzalloc(dev, sizeof(struct talitos_private), GFP_KERNEL);
3299	if (!priv)
3300		return -ENOMEM;
3301
3302	INIT_LIST_HEAD(&priv->alg_list);
3303
3304	dev_set_drvdata(dev, priv);
3305
3306	priv->ofdev = ofdev;
3307
3308	spin_lock_init(&priv->reg_lock);
3309
3310	res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
3311	if (!res)
3312		return -ENXIO;
3313	priv->reg = devm_ioremap(dev, res->start, resource_size(res));
3314	if (!priv->reg) {
3315		dev_err(dev, "failed to of_iomap\n");
3316		err = -ENOMEM;
3317		goto err_out;
3318	}
3319
3320	/* get SEC version capabilities from device tree */
3321	of_property_read_u32(np, "fsl,num-channels", &priv->num_channels);
3322	of_property_read_u32(np, "fsl,channel-fifo-len", &priv->chfifo_len);
3323	of_property_read_u32(np, "fsl,exec-units-mask", &priv->exec_units);
3324	of_property_read_u32(np, "fsl,descriptor-types-mask",
3325			     &priv->desc_types);
3326
3327	if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
3328	    !priv->exec_units || !priv->desc_types) {
3329		dev_err(dev, "invalid property data in device tree node\n");
3330		err = -EINVAL;
3331		goto err_out;
3332	}
3333
3334	if (of_device_is_compatible(np, "fsl,sec3.0"))
3335		priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
3336
3337	if (of_device_is_compatible(np, "fsl,sec2.1"))
3338		priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
3339				  TALITOS_FTR_SHA224_HWINIT |
3340				  TALITOS_FTR_HMAC_OK;
3341
3342	if (of_device_is_compatible(np, "fsl,sec1.0"))
3343		priv->features |= TALITOS_FTR_SEC1;
3344
3345	if (of_device_is_compatible(np, "fsl,sec1.2")) {
3346		priv->reg_deu = priv->reg + TALITOS12_DEU;
3347		priv->reg_aesu = priv->reg + TALITOS12_AESU;
3348		priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
3349		stride = TALITOS1_CH_STRIDE;
3350	} else if (of_device_is_compatible(np, "fsl,sec1.0")) {
3351		priv->reg_deu = priv->reg + TALITOS10_DEU;
3352		priv->reg_aesu = priv->reg + TALITOS10_AESU;
3353		priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
3354		priv->reg_afeu = priv->reg + TALITOS10_AFEU;
3355		priv->reg_rngu = priv->reg + TALITOS10_RNGU;
3356		priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
3357		stride = TALITOS1_CH_STRIDE;
3358	} else {
3359		priv->reg_deu = priv->reg + TALITOS2_DEU;
3360		priv->reg_aesu = priv->reg + TALITOS2_AESU;
3361		priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
3362		priv->reg_afeu = priv->reg + TALITOS2_AFEU;
3363		priv->reg_rngu = priv->reg + TALITOS2_RNGU;
3364		priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
3365		priv->reg_keu = priv->reg + TALITOS2_KEU;
3366		priv->reg_crcu = priv->reg + TALITOS2_CRCU;
3367		stride = TALITOS2_CH_STRIDE;
3368	}
3369
3370	err = talitos_probe_irq(ofdev);
3371	if (err)
3372		goto err_out;
3373
3374	if (has_ftr_sec1(priv)) {
3375		if (priv->num_channels == 1)
3376			tasklet_init(&priv->done_task[0], talitos1_done_ch0,
3377				     (unsigned long)dev);
3378		else
3379			tasklet_init(&priv->done_task[0], talitos1_done_4ch,
3380				     (unsigned long)dev);
3381	} else {
3382		if (priv->irq[1]) {
3383			tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
3384				     (unsigned long)dev);
3385			tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
3386				     (unsigned long)dev);
3387		} else if (priv->num_channels == 1) {
3388			tasklet_init(&priv->done_task[0], talitos2_done_ch0,
3389				     (unsigned long)dev);
3390		} else {
3391			tasklet_init(&priv->done_task[0], talitos2_done_4ch,
3392				     (unsigned long)dev);
3393		}
3394	}
3395
3396	priv->chan = devm_kcalloc(dev,
3397				  priv->num_channels,
3398				  sizeof(struct talitos_channel),
3399				  GFP_KERNEL);
3400	if (!priv->chan) {
3401		dev_err(dev, "failed to allocate channel management space\n");
3402		err = -ENOMEM;
3403		goto err_out;
3404	}
3405
3406	priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
3407
3408	for (i = 0; i < priv->num_channels; i++) {
3409		priv->chan[i].reg = priv->reg + stride * (i + 1);
3410		if (!priv->irq[1] || !(i & 1))
3411			priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
3412
3413		spin_lock_init(&priv->chan[i].head_lock);
3414		spin_lock_init(&priv->chan[i].tail_lock);
3415
3416		priv->chan[i].fifo = devm_kcalloc(dev,
3417						priv->fifo_len,
3418						sizeof(struct talitos_request),
3419						GFP_KERNEL);
3420		if (!priv->chan[i].fifo) {
3421			dev_err(dev, "failed to allocate request fifo %d\n", i);
3422			err = -ENOMEM;
3423			goto err_out;
3424		}
3425
3426		atomic_set(&priv->chan[i].submit_count,
3427			   -(priv->chfifo_len - 1));
3428	}
3429
3430	dma_set_mask(dev, DMA_BIT_MASK(36));
3431
3432	/* reset and initialize the h/w */
3433	err = init_device(dev);
3434	if (err) {
3435		dev_err(dev, "failed to initialize device\n");
3436		goto err_out;
3437	}
3438
3439	/* register the RNG, if available */
3440	if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3441		err = talitos_register_rng(dev);
3442		if (err) {
3443			dev_err(dev, "failed to register hwrng: %d\n", err);
3444			goto err_out;
3445		} else
3446			dev_info(dev, "hwrng\n");
3447	}
3448
3449	/* register crypto algorithms the device supports */
3450	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3451		if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3452			struct talitos_crypto_alg *t_alg;
3453			struct crypto_alg *alg = NULL;
3454
3455			t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3456			if (IS_ERR(t_alg)) {
3457				err = PTR_ERR(t_alg);
3458				if (err == -ENOTSUPP)
3459					continue;
3460				goto err_out;
3461			}
3462
3463			switch (t_alg->algt.type) {
3464			case CRYPTO_ALG_TYPE_ABLKCIPHER:
3465				err = crypto_register_alg(
3466						&t_alg->algt.alg.crypto);
3467				alg = &t_alg->algt.alg.crypto;
3468				break;
3469
3470			case CRYPTO_ALG_TYPE_AEAD:
3471				err = crypto_register_aead(
3472					&t_alg->algt.alg.aead);
3473				alg = &t_alg->algt.alg.aead.base;
3474				break;
3475
3476			case CRYPTO_ALG_TYPE_AHASH:
3477				err = crypto_register_ahash(
3478						&t_alg->algt.alg.hash);
3479				alg = &t_alg->algt.alg.hash.halg.base;
3480				break;
3481			}
3482			if (err) {
3483				dev_err(dev, "%s alg registration failed\n",
3484					alg->cra_driver_name);
3485				devm_kfree(dev, t_alg);
3486			} else
3487				list_add_tail(&t_alg->entry, &priv->alg_list);
3488		}
3489	}
3490	if (!list_empty(&priv->alg_list))
3491		dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3492			 (char *)of_get_property(np, "compatible", NULL));
3493
3494	return 0;
3495
3496err_out:
3497	talitos_remove(ofdev);
3498
3499	return err;
3500}
3501
3502static const struct of_device_id talitos_match[] = {
3503#ifdef CONFIG_CRYPTO_DEV_TALITOS1
3504	{
3505		.compatible = "fsl,sec1.0",
3506	},
3507#endif
3508#ifdef CONFIG_CRYPTO_DEV_TALITOS2
3509	{
3510		.compatible = "fsl,sec2.0",
3511	},
3512#endif
3513	{},
3514};
3515MODULE_DEVICE_TABLE(of, talitos_match);
3516
3517static struct platform_driver talitos_driver = {
3518	.driver = {
3519		.name = "talitos",
3520		.of_match_table = talitos_match,
3521	},
3522	.probe = talitos_probe,
3523	.remove = talitos_remove,
3524};
3525
3526module_platform_driver(talitos_driver);
3527
3528MODULE_LICENSE("GPL");
3529MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3530MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");