Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * talitos - Freescale Integrated Security Engine (SEC) device driver
   4 *
   5 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
   6 *
   7 * Scatterlist Crypto API glue code copied from files with the following:
   8 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
   9 *
  10 * Crypto algorithm registration code copied from hifn driver:
  11 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
  12 * All rights reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  13 */
  14
  15#include <linux/kernel.h>
  16#include <linux/module.h>
  17#include <linux/mod_devicetable.h>
  18#include <linux/device.h>
  19#include <linux/interrupt.h>
  20#include <linux/crypto.h>
  21#include <linux/hw_random.h>
  22#include <linux/of_address.h>
  23#include <linux/of_irq.h>
  24#include <linux/of_platform.h>
  25#include <linux/dma-mapping.h>
  26#include <linux/io.h>
  27#include <linux/spinlock.h>
  28#include <linux/rtnetlink.h>
  29#include <linux/slab.h>
  30
  31#include <crypto/algapi.h>
  32#include <crypto/aes.h>
  33#include <crypto/internal/des.h>
  34#include <crypto/sha.h>
  35#include <crypto/md5.h>
  36#include <crypto/internal/aead.h>
  37#include <crypto/authenc.h>
  38#include <crypto/internal/skcipher.h>
  39#include <crypto/hash.h>
  40#include <crypto/internal/hash.h>
  41#include <crypto/scatterwalk.h>
  42
  43#include "talitos.h"
  44
  45static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
  46			   unsigned int len, bool is_sec1)
  47{
  48	ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
  49	if (is_sec1) {
  50		ptr->len1 = cpu_to_be16(len);
  51	} else {
  52		ptr->len = cpu_to_be16(len);
  53		ptr->eptr = upper_32_bits(dma_addr);
  54	}
  55}
  56
  57static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
  58			     struct talitos_ptr *src_ptr, bool is_sec1)
  59{
  60	dst_ptr->ptr = src_ptr->ptr;
  61	if (is_sec1) {
  62		dst_ptr->len1 = src_ptr->len1;
  63	} else {
  64		dst_ptr->len = src_ptr->len;
  65		dst_ptr->eptr = src_ptr->eptr;
  66	}
  67}
  68
  69static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
  70					   bool is_sec1)
  71{
  72	if (is_sec1)
  73		return be16_to_cpu(ptr->len1);
  74	else
  75		return be16_to_cpu(ptr->len);
  76}
  77
  78static void to_talitos_ptr_ext_set(struct talitos_ptr *ptr, u8 val,
  79				   bool is_sec1)
  80{
  81	if (!is_sec1)
  82		ptr->j_extent = val;
  83}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  84
  85static void to_talitos_ptr_ext_or(struct talitos_ptr *ptr, u8 val, bool is_sec1)
 
 
 
 
 
 
 
 
 
 
  86{
  87	if (!is_sec1)
  88		ptr->j_extent |= val;
  89}
  90
  91/*
  92 * map virtual single (contiguous) pointer to h/w descriptor pointer
  93 */
  94static void __map_single_talitos_ptr(struct device *dev,
  95				     struct talitos_ptr *ptr,
  96				     unsigned int len, void *data,
  97				     enum dma_data_direction dir,
  98				     unsigned long attrs)
  99{
 100	dma_addr_t dma_addr = dma_map_single_attrs(dev, data, len, dir, attrs);
 101	struct talitos_private *priv = dev_get_drvdata(dev);
 102	bool is_sec1 = has_ftr_sec1(priv);
 103
 104	to_talitos_ptr(ptr, dma_addr, len, is_sec1);
 105}
 106
 107static void map_single_talitos_ptr(struct device *dev,
 108				   struct talitos_ptr *ptr,
 109				   unsigned int len, void *data,
 
 110				   enum dma_data_direction dir)
 111{
 112	__map_single_talitos_ptr(dev, ptr, len, data, dir, 0);
 113}
 114
 115static void map_single_talitos_ptr_nosync(struct device *dev,
 116					  struct talitos_ptr *ptr,
 117					  unsigned int len, void *data,
 118					  enum dma_data_direction dir)
 119{
 120	__map_single_talitos_ptr(dev, ptr, len, data, dir,
 121				 DMA_ATTR_SKIP_CPU_SYNC);
 122}
 123
 124/*
 125 * unmap bus single (contiguous) h/w descriptor pointer
 126 */
 127static void unmap_single_talitos_ptr(struct device *dev,
 128				     struct talitos_ptr *ptr,
 129				     enum dma_data_direction dir)
 130{
 131	struct talitos_private *priv = dev_get_drvdata(dev);
 132	bool is_sec1 = has_ftr_sec1(priv);
 133
 134	dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
 135			 from_talitos_ptr_len(ptr, is_sec1), dir);
 136}
 137
 138static int reset_channel(struct device *dev, int ch)
 139{
 140	struct talitos_private *priv = dev_get_drvdata(dev);
 141	unsigned int timeout = TALITOS_TIMEOUT;
 142	bool is_sec1 = has_ftr_sec1(priv);
 143
 144	if (is_sec1) {
 145		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
 146			  TALITOS1_CCCR_LO_RESET);
 147
 148		while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
 149			TALITOS1_CCCR_LO_RESET) && --timeout)
 150			cpu_relax();
 151	} else {
 152		setbits32(priv->chan[ch].reg + TALITOS_CCCR,
 153			  TALITOS2_CCCR_RESET);
 154
 155		while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
 156			TALITOS2_CCCR_RESET) && --timeout)
 157			cpu_relax();
 158	}
 159
 160	if (timeout == 0) {
 161		dev_err(dev, "failed to reset channel %d\n", ch);
 162		return -EIO;
 163	}
 164
 165	/* set 36-bit addressing, done writeback enable and done IRQ enable */
 166	setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
 167		  TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
 168	/* enable chaining descriptors */
 169	if (is_sec1)
 170		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
 171			  TALITOS_CCCR_LO_NE);
 172
 173	/* and ICCR writeback, if available */
 174	if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
 175		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
 176		          TALITOS_CCCR_LO_IWSE);
 177
 178	return 0;
 179}
 180
 181static int reset_device(struct device *dev)
 182{
 183	struct talitos_private *priv = dev_get_drvdata(dev);
 184	unsigned int timeout = TALITOS_TIMEOUT;
 185	bool is_sec1 = has_ftr_sec1(priv);
 186	u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
 187
 188	setbits32(priv->reg + TALITOS_MCR, mcr);
 189
 190	while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
 191	       && --timeout)
 192		cpu_relax();
 193
 194	if (priv->irq[1]) {
 195		mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
 196		setbits32(priv->reg + TALITOS_MCR, mcr);
 197	}
 198
 199	if (timeout == 0) {
 200		dev_err(dev, "failed to reset device\n");
 201		return -EIO;
 202	}
 203
 204	return 0;
 205}
 206
 207/*
 208 * Reset and initialize the device
 209 */
 210static int init_device(struct device *dev)
 211{
 212	struct talitos_private *priv = dev_get_drvdata(dev);
 213	int ch, err;
 214	bool is_sec1 = has_ftr_sec1(priv);
 215
 216	/*
 217	 * Master reset
 218	 * errata documentation: warning: certain SEC interrupts
 219	 * are not fully cleared by writing the MCR:SWR bit,
 220	 * set bit twice to completely reset
 221	 */
 222	err = reset_device(dev);
 223	if (err)
 224		return err;
 225
 226	err = reset_device(dev);
 227	if (err)
 228		return err;
 229
 230	/* reset channels */
 231	for (ch = 0; ch < priv->num_channels; ch++) {
 232		err = reset_channel(dev, ch);
 233		if (err)
 234			return err;
 235	}
 236
 237	/* enable channel done and error interrupts */
 238	if (is_sec1) {
 239		clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
 240		clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
 241		/* disable parity error check in DEU (erroneous? test vect.) */
 242		setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
 243	} else {
 244		setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
 245		setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
 246	}
 247
 248	/* disable integrity check error interrupts (use writeback instead) */
 249	if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
 250		setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
 251		          TALITOS_MDEUICR_LO_ICE);
 252
 253	return 0;
 254}
 255
 256/**
 257 * talitos_submit - submits a descriptor to the device for processing
 258 * @dev:	the SEC device to be used
 259 * @ch:		the SEC device channel to be used
 260 * @desc:	the descriptor to be processed by the device
 261 * @callback:	whom to call when processing is complete
 262 * @context:	a handle for use by caller (optional)
 263 *
 264 * desc must contain valid dma-mapped (bus physical) address pointers.
 265 * callback must check err and feedback in descriptor header
 266 * for device processing status.
 267 */
 268static int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
 269			  void (*callback)(struct device *dev,
 270					   struct talitos_desc *desc,
 271					   void *context, int error),
 272			  void *context)
 273{
 274	struct talitos_private *priv = dev_get_drvdata(dev);
 275	struct talitos_request *request;
 276	unsigned long flags;
 277	int head;
 278	bool is_sec1 = has_ftr_sec1(priv);
 279
 280	spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
 281
 282	if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
 283		/* h/w fifo is full */
 284		spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
 285		return -EAGAIN;
 286	}
 287
 288	head = priv->chan[ch].head;
 289	request = &priv->chan[ch].fifo[head];
 290
 291	/* map descriptor and save caller data */
 292	if (is_sec1) {
 293		desc->hdr1 = desc->hdr;
 294		request->dma_desc = dma_map_single(dev, &desc->hdr1,
 295						   TALITOS_DESC_SIZE,
 296						   DMA_BIDIRECTIONAL);
 297	} else {
 298		request->dma_desc = dma_map_single(dev, desc,
 299						   TALITOS_DESC_SIZE,
 300						   DMA_BIDIRECTIONAL);
 301	}
 302	request->callback = callback;
 303	request->context = context;
 304
 305	/* increment fifo head */
 306	priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
 307
 308	smp_wmb();
 309	request->desc = desc;
 310
 311	/* GO! */
 312	wmb();
 313	out_be32(priv->chan[ch].reg + TALITOS_FF,
 314		 upper_32_bits(request->dma_desc));
 315	out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
 316		 lower_32_bits(request->dma_desc));
 317
 318	spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
 319
 320	return -EINPROGRESS;
 321}
 322
 323static __be32 get_request_hdr(struct talitos_request *request, bool is_sec1)
 324{
 325	struct talitos_edesc *edesc;
 326
 327	if (!is_sec1)
 328		return request->desc->hdr;
 329
 330	if (!request->desc->next_desc)
 331		return request->desc->hdr1;
 332
 333	edesc = container_of(request->desc, struct talitos_edesc, desc);
 334
 335	return ((struct talitos_desc *)(edesc->buf + edesc->dma_len))->hdr1;
 336}
 337
 338/*
 339 * process what was done, notify callback of error if not
 340 */
 341static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
 342{
 343	struct talitos_private *priv = dev_get_drvdata(dev);
 344	struct talitos_request *request, saved_req;
 345	unsigned long flags;
 346	int tail, status;
 347	bool is_sec1 = has_ftr_sec1(priv);
 348
 349	spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
 350
 351	tail = priv->chan[ch].tail;
 352	while (priv->chan[ch].fifo[tail].desc) {
 353		__be32 hdr;
 354
 355		request = &priv->chan[ch].fifo[tail];
 356
 357		/* descriptors with their done bits set don't get the error */
 358		rmb();
 359		hdr = get_request_hdr(request, is_sec1);
 360
 361		if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
 362			status = 0;
 363		else
 364			if (!error)
 365				break;
 366			else
 367				status = error;
 368
 369		dma_unmap_single(dev, request->dma_desc,
 370				 TALITOS_DESC_SIZE,
 371				 DMA_BIDIRECTIONAL);
 372
 373		/* copy entries so we can call callback outside lock */
 374		saved_req.desc = request->desc;
 375		saved_req.callback = request->callback;
 376		saved_req.context = request->context;
 377
 378		/* release request entry in fifo */
 379		smp_wmb();
 380		request->desc = NULL;
 381
 382		/* increment fifo tail */
 383		priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
 384
 385		spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
 386
 387		atomic_dec(&priv->chan[ch].submit_count);
 388
 389		saved_req.callback(dev, saved_req.desc, saved_req.context,
 390				   status);
 391		/* channel may resume processing in single desc error case */
 392		if (error && !reset_ch && status == error)
 393			return;
 394		spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
 395		tail = priv->chan[ch].tail;
 396	}
 397
 398	spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
 399}
 400
 401/*
 402 * process completed requests for channels that have done status
 403 */
 404#define DEF_TALITOS1_DONE(name, ch_done_mask)				\
 405static void talitos1_done_##name(unsigned long data)			\
 406{									\
 407	struct device *dev = (struct device *)data;			\
 408	struct talitos_private *priv = dev_get_drvdata(dev);		\
 409	unsigned long flags;						\
 410									\
 411	if (ch_done_mask & 0x10000000)					\
 412		flush_channel(dev, 0, 0, 0);			\
 413	if (ch_done_mask & 0x40000000)					\
 414		flush_channel(dev, 1, 0, 0);			\
 415	if (ch_done_mask & 0x00010000)					\
 416		flush_channel(dev, 2, 0, 0);			\
 417	if (ch_done_mask & 0x00040000)					\
 418		flush_channel(dev, 3, 0, 0);			\
 419									\
 420	/* At this point, all completed channels have been processed */	\
 421	/* Unmask done interrupts for channels completed later on. */	\
 422	spin_lock_irqsave(&priv->reg_lock, flags);			\
 423	clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);		\
 424	clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);	\
 425	spin_unlock_irqrestore(&priv->reg_lock, flags);			\
 426}
 427
 428DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
 429DEF_TALITOS1_DONE(ch0, TALITOS1_ISR_CH_0_DONE)
 430
 431#define DEF_TALITOS2_DONE(name, ch_done_mask)				\
 432static void talitos2_done_##name(unsigned long data)			\
 433{									\
 434	struct device *dev = (struct device *)data;			\
 435	struct talitos_private *priv = dev_get_drvdata(dev);		\
 436	unsigned long flags;						\
 437									\
 438	if (ch_done_mask & 1)						\
 439		flush_channel(dev, 0, 0, 0);				\
 
 
 440	if (ch_done_mask & (1 << 2))					\
 441		flush_channel(dev, 1, 0, 0);				\
 442	if (ch_done_mask & (1 << 4))					\
 443		flush_channel(dev, 2, 0, 0);				\
 444	if (ch_done_mask & (1 << 6))					\
 445		flush_channel(dev, 3, 0, 0);				\
 446									\
 
 447	/* At this point, all completed channels have been processed */	\
 448	/* Unmask done interrupts for channels completed later on. */	\
 449	spin_lock_irqsave(&priv->reg_lock, flags);			\
 450	setbits32(priv->reg + TALITOS_IMR, ch_done_mask);		\
 451	setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);	\
 452	spin_unlock_irqrestore(&priv->reg_lock, flags);			\
 453}
 454
 455DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
 456DEF_TALITOS2_DONE(ch0, TALITOS2_ISR_CH_0_DONE)
 457DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
 458DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
 459
 460/*
 461 * locate current (offending) descriptor
 462 */
 463static u32 current_desc_hdr(struct device *dev, int ch)
 464{
 465	struct talitos_private *priv = dev_get_drvdata(dev);
 466	int tail, iter;
 467	dma_addr_t cur_desc;
 468
 469	cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
 470	cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
 471
 472	if (!cur_desc) {
 473		dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
 474		return 0;
 475	}
 476
 477	tail = priv->chan[ch].tail;
 478
 479	iter = tail;
 480	while (priv->chan[ch].fifo[iter].dma_desc != cur_desc &&
 481	       priv->chan[ch].fifo[iter].desc->next_desc != cur_desc) {
 482		iter = (iter + 1) & (priv->fifo_len - 1);
 483		if (iter == tail) {
 484			dev_err(dev, "couldn't locate current descriptor\n");
 485			return 0;
 486		}
 487	}
 488
 489	if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc) {
 490		struct talitos_edesc *edesc;
 491
 492		edesc = container_of(priv->chan[ch].fifo[iter].desc,
 493				     struct talitos_edesc, desc);
 494		return ((struct talitos_desc *)
 495			(edesc->buf + edesc->dma_len))->hdr;
 496	}
 497
 498	return priv->chan[ch].fifo[iter].desc->hdr;
 499}
 500
 501/*
 502 * user diagnostics; report root cause of error based on execution unit status
 503 */
 504static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
 505{
 506	struct talitos_private *priv = dev_get_drvdata(dev);
 507	int i;
 508
 509	if (!desc_hdr)
 510		desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
 511
 512	switch (desc_hdr & DESC_HDR_SEL0_MASK) {
 513	case DESC_HDR_SEL0_AFEU:
 514		dev_err(dev, "AFEUISR 0x%08x_%08x\n",
 515			in_be32(priv->reg_afeu + TALITOS_EUISR),
 516			in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
 517		break;
 518	case DESC_HDR_SEL0_DEU:
 519		dev_err(dev, "DEUISR 0x%08x_%08x\n",
 520			in_be32(priv->reg_deu + TALITOS_EUISR),
 521			in_be32(priv->reg_deu + TALITOS_EUISR_LO));
 522		break;
 523	case DESC_HDR_SEL0_MDEUA:
 524	case DESC_HDR_SEL0_MDEUB:
 525		dev_err(dev, "MDEUISR 0x%08x_%08x\n",
 526			in_be32(priv->reg_mdeu + TALITOS_EUISR),
 527			in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
 528		break;
 529	case DESC_HDR_SEL0_RNG:
 530		dev_err(dev, "RNGUISR 0x%08x_%08x\n",
 531			in_be32(priv->reg_rngu + TALITOS_ISR),
 532			in_be32(priv->reg_rngu + TALITOS_ISR_LO));
 533		break;
 534	case DESC_HDR_SEL0_PKEU:
 535		dev_err(dev, "PKEUISR 0x%08x_%08x\n",
 536			in_be32(priv->reg_pkeu + TALITOS_EUISR),
 537			in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
 538		break;
 539	case DESC_HDR_SEL0_AESU:
 540		dev_err(dev, "AESUISR 0x%08x_%08x\n",
 541			in_be32(priv->reg_aesu + TALITOS_EUISR),
 542			in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
 543		break;
 544	case DESC_HDR_SEL0_CRCU:
 545		dev_err(dev, "CRCUISR 0x%08x_%08x\n",
 546			in_be32(priv->reg_crcu + TALITOS_EUISR),
 547			in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
 548		break;
 549	case DESC_HDR_SEL0_KEU:
 550		dev_err(dev, "KEUISR 0x%08x_%08x\n",
 551			in_be32(priv->reg_pkeu + TALITOS_EUISR),
 552			in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
 553		break;
 554	}
 555
 556	switch (desc_hdr & DESC_HDR_SEL1_MASK) {
 557	case DESC_HDR_SEL1_MDEUA:
 558	case DESC_HDR_SEL1_MDEUB:
 559		dev_err(dev, "MDEUISR 0x%08x_%08x\n",
 560			in_be32(priv->reg_mdeu + TALITOS_EUISR),
 561			in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
 562		break;
 563	case DESC_HDR_SEL1_CRCU:
 564		dev_err(dev, "CRCUISR 0x%08x_%08x\n",
 565			in_be32(priv->reg_crcu + TALITOS_EUISR),
 566			in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
 567		break;
 568	}
 569
 570	for (i = 0; i < 8; i++)
 571		dev_err(dev, "DESCBUF 0x%08x_%08x\n",
 572			in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
 573			in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
 574}
 575
 576/*
 577 * recover from error interrupts
 578 */
 579static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
 580{
 581	struct talitos_private *priv = dev_get_drvdata(dev);
 582	unsigned int timeout = TALITOS_TIMEOUT;
 583	int ch, error, reset_dev = 0;
 584	u32 v_lo;
 585	bool is_sec1 = has_ftr_sec1(priv);
 586	int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
 587
 588	for (ch = 0; ch < priv->num_channels; ch++) {
 589		/* skip channels without errors */
 590		if (is_sec1) {
 591			/* bits 29, 31, 17, 19 */
 592			if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
 593				continue;
 594		} else {
 595			if (!(isr & (1 << (ch * 2 + 1))))
 596				continue;
 597		}
 598
 599		error = -EINVAL;
 600
 
 601		v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
 602
 603		if (v_lo & TALITOS_CCPSR_LO_DOF) {
 604			dev_err(dev, "double fetch fifo overflow error\n");
 605			error = -EAGAIN;
 606			reset_ch = 1;
 607		}
 608		if (v_lo & TALITOS_CCPSR_LO_SOF) {
 609			/* h/w dropped descriptor */
 610			dev_err(dev, "single fetch fifo overflow error\n");
 611			error = -EAGAIN;
 612		}
 613		if (v_lo & TALITOS_CCPSR_LO_MDTE)
 614			dev_err(dev, "master data transfer error\n");
 615		if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
 616			dev_err(dev, is_sec1 ? "pointer not complete error\n"
 617					     : "s/g data length zero error\n");
 618		if (v_lo & TALITOS_CCPSR_LO_FPZ)
 619			dev_err(dev, is_sec1 ? "parity error\n"
 620					     : "fetch pointer zero error\n");
 621		if (v_lo & TALITOS_CCPSR_LO_IDH)
 622			dev_err(dev, "illegal descriptor header error\n");
 623		if (v_lo & TALITOS_CCPSR_LO_IEU)
 624			dev_err(dev, is_sec1 ? "static assignment error\n"
 625					     : "invalid exec unit error\n");
 626		if (v_lo & TALITOS_CCPSR_LO_EU)
 627			report_eu_error(dev, ch, current_desc_hdr(dev, ch));
 628		if (!is_sec1) {
 629			if (v_lo & TALITOS_CCPSR_LO_GB)
 630				dev_err(dev, "gather boundary error\n");
 631			if (v_lo & TALITOS_CCPSR_LO_GRL)
 632				dev_err(dev, "gather return/length error\n");
 633			if (v_lo & TALITOS_CCPSR_LO_SB)
 634				dev_err(dev, "scatter boundary error\n");
 635			if (v_lo & TALITOS_CCPSR_LO_SRL)
 636				dev_err(dev, "scatter return/length error\n");
 637		}
 638
 639		flush_channel(dev, ch, error, reset_ch);
 640
 641		if (reset_ch) {
 642			reset_channel(dev, ch);
 643		} else {
 644			setbits32(priv->chan[ch].reg + TALITOS_CCCR,
 645				  TALITOS2_CCCR_CONT);
 646			setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
 647			while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
 648			       TALITOS2_CCCR_CONT) && --timeout)
 649				cpu_relax();
 650			if (timeout == 0) {
 651				dev_err(dev, "failed to restart channel %d\n",
 652					ch);
 653				reset_dev = 1;
 654			}
 655		}
 656	}
 657	if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
 658	    (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
 659		if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
 660			dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
 661				isr, isr_lo);
 662		else
 663			dev_err(dev, "done overflow, internal time out, or "
 664				"rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
 665
 666		/* purge request queues */
 667		for (ch = 0; ch < priv->num_channels; ch++)
 668			flush_channel(dev, ch, -EIO, 1);
 669
 670		/* reset and reinitialize the device */
 671		init_device(dev);
 672	}
 673}
 674
 675#define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)	       \
 676static irqreturn_t talitos1_interrupt_##name(int irq, void *data)	       \
 677{									       \
 678	struct device *dev = data;					       \
 679	struct talitos_private *priv = dev_get_drvdata(dev);		       \
 680	u32 isr, isr_lo;						       \
 681	unsigned long flags;						       \
 682									       \
 683	spin_lock_irqsave(&priv->reg_lock, flags);			       \
 684	isr = in_be32(priv->reg + TALITOS_ISR);				       \
 685	isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);			       \
 686	/* Acknowledge interrupt */					       \
 687	out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
 688	out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);			       \
 689									       \
 690	if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) {    \
 691		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
 692		talitos_error(dev, isr & ch_err_mask, isr_lo);		       \
 693	}								       \
 694	else {								       \
 695		if (likely(isr & ch_done_mask)) {			       \
 696			/* mask further done interrupts. */		       \
 697			setbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
 698			/* done_task will unmask done interrupts at exit */    \
 699			tasklet_schedule(&priv->done_task[tlet]);	       \
 700		}							       \
 701		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
 702	}								       \
 703									       \
 704	return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
 705								IRQ_NONE;      \
 706}
 707
 708DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
 709
 710#define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)	       \
 711static irqreturn_t talitos2_interrupt_##name(int irq, void *data)	       \
 712{									       \
 713	struct device *dev = data;					       \
 714	struct talitos_private *priv = dev_get_drvdata(dev);		       \
 715	u32 isr, isr_lo;						       \
 716	unsigned long flags;						       \
 717									       \
 718	spin_lock_irqsave(&priv->reg_lock, flags);			       \
 719	isr = in_be32(priv->reg + TALITOS_ISR);				       \
 720	isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);			       \
 721	/* Acknowledge interrupt */					       \
 722	out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
 723	out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);			       \
 724									       \
 725	if (unlikely(isr & ch_err_mask || isr_lo)) {			       \
 726		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
 727		talitos_error(dev, isr & ch_err_mask, isr_lo);		       \
 728	}								       \
 729	else {								       \
 730		if (likely(isr & ch_done_mask)) {			       \
 731			/* mask further done interrupts. */		       \
 732			clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
 733			/* done_task will unmask done interrupts at exit */    \
 734			tasklet_schedule(&priv->done_task[tlet]);	       \
 735		}							       \
 736		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
 737	}								       \
 738									       \
 739	return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
 740								IRQ_NONE;      \
 741}
 742
 743DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
 744DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
 745		       0)
 746DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
 747		       1)
 748
 749/*
 750 * hwrng
 751 */
 752static int talitos_rng_data_present(struct hwrng *rng, int wait)
 753{
 754	struct device *dev = (struct device *)rng->priv;
 755	struct talitos_private *priv = dev_get_drvdata(dev);
 756	u32 ofl;
 757	int i;
 758
 759	for (i = 0; i < 20; i++) {
 760		ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
 761		      TALITOS_RNGUSR_LO_OFL;
 762		if (ofl || !wait)
 763			break;
 764		udelay(10);
 765	}
 766
 767	return !!ofl;
 768}
 769
 770static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
 771{
 772	struct device *dev = (struct device *)rng->priv;
 773	struct talitos_private *priv = dev_get_drvdata(dev);
 774
 775	/* rng fifo requires 64-bit accesses */
 776	*data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
 777	*data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
 778
 779	return sizeof(u32);
 780}
 781
 782static int talitos_rng_init(struct hwrng *rng)
 783{
 784	struct device *dev = (struct device *)rng->priv;
 785	struct talitos_private *priv = dev_get_drvdata(dev);
 786	unsigned int timeout = TALITOS_TIMEOUT;
 787
 788	setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
 789	while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
 790		 & TALITOS_RNGUSR_LO_RD)
 791	       && --timeout)
 792		cpu_relax();
 793	if (timeout == 0) {
 794		dev_err(dev, "failed to reset rng hw\n");
 795		return -ENODEV;
 796	}
 797
 798	/* start generating */
 799	setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
 800
 801	return 0;
 802}
 803
 804static int talitos_register_rng(struct device *dev)
 805{
 806	struct talitos_private *priv = dev_get_drvdata(dev);
 807	int err;
 808
 809	priv->rng.name		= dev_driver_string(dev),
 810	priv->rng.init		= talitos_rng_init,
 811	priv->rng.data_present	= talitos_rng_data_present,
 812	priv->rng.data_read	= talitos_rng_data_read,
 813	priv->rng.priv		= (unsigned long)dev;
 814
 815	err = hwrng_register(&priv->rng);
 816	if (!err)
 817		priv->rng_registered = true;
 818
 819	return err;
 820}
 821
 822static void talitos_unregister_rng(struct device *dev)
 823{
 824	struct talitos_private *priv = dev_get_drvdata(dev);
 825
 826	if (!priv->rng_registered)
 827		return;
 828
 829	hwrng_unregister(&priv->rng);
 830	priv->rng_registered = false;
 831}
 832
 833/*
 834 * crypto alg
 835 */
 836#define TALITOS_CRA_PRIORITY		3000
 837/*
 838 * Defines a priority for doing AEAD with descriptors type
 839 * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
 840 */
 841#define TALITOS_CRA_PRIORITY_AEAD_HSNA	(TALITOS_CRA_PRIORITY - 1)
 842#ifdef CONFIG_CRYPTO_DEV_TALITOS2
 843#define TALITOS_MAX_KEY_SIZE		(AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
 844#else
 845#define TALITOS_MAX_KEY_SIZE		(AES_MAX_KEY_SIZE + SHA256_BLOCK_SIZE)
 846#endif
 847#define TALITOS_MAX_IV_LENGTH		16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
 848
 
 
 849struct talitos_ctx {
 850	struct device *dev;
 851	int ch;
 852	__be32 desc_hdr_template;
 853	u8 key[TALITOS_MAX_KEY_SIZE];
 854	u8 iv[TALITOS_MAX_IV_LENGTH];
 855	dma_addr_t dma_key;
 856	unsigned int keylen;
 857	unsigned int enckeylen;
 858	unsigned int authkeylen;
 
 859};
 860
 861#define HASH_MAX_BLOCK_SIZE		SHA512_BLOCK_SIZE
 862#define TALITOS_MDEU_MAX_CONTEXT_SIZE	TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
 863
 864struct talitos_ahash_req_ctx {
 865	u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
 866	unsigned int hw_context_size;
 867	u8 buf[2][HASH_MAX_BLOCK_SIZE];
 868	int buf_idx;
 869	unsigned int swinit;
 870	unsigned int first;
 871	unsigned int last;
 872	unsigned int to_hash_later;
 873	unsigned int nbuf;
 874	struct scatterlist bufsl[2];
 875	struct scatterlist *psrc;
 876};
 877
 878struct talitos_export_state {
 879	u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
 880	u8 buf[HASH_MAX_BLOCK_SIZE];
 881	unsigned int swinit;
 882	unsigned int first;
 883	unsigned int last;
 884	unsigned int to_hash_later;
 885	unsigned int nbuf;
 886};
 887
 888static int aead_setkey(struct crypto_aead *authenc,
 889		       const u8 *key, unsigned int keylen)
 890{
 891	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
 892	struct device *dev = ctx->dev;
 893	struct crypto_authenc_keys keys;
 
 
 894
 895	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
 896		goto badkey;
 897
 898	if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
 899		goto badkey;
 900
 901	if (ctx->keylen)
 902		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
 903
 904	memcpy(ctx->key, keys.authkey, keys.authkeylen);
 905	memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
 906
 907	ctx->keylen = keys.authkeylen + keys.enckeylen;
 908	ctx->enckeylen = keys.enckeylen;
 909	ctx->authkeylen = keys.authkeylen;
 910	ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
 911				      DMA_TO_DEVICE);
 912
 913	memzero_explicit(&keys, sizeof(keys));
 914	return 0;
 915
 916badkey:
 917	memzero_explicit(&keys, sizeof(keys));
 918	return -EINVAL;
 919}
 920
 921static int aead_des3_setkey(struct crypto_aead *authenc,
 922			    const u8 *key, unsigned int keylen)
 923{
 924	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
 925	struct device *dev = ctx->dev;
 926	struct crypto_authenc_keys keys;
 927	int err;
 928
 929	err = crypto_authenc_extractkeys(&keys, key, keylen);
 930	if (unlikely(err))
 931		goto out;
 932
 933	err = -EINVAL;
 934	if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
 935		goto out;
 936
 937	err = verify_aead_des3_key(authenc, keys.enckey, keys.enckeylen);
 938	if (err)
 939		goto out;
 940
 941	if (ctx->keylen)
 942		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
 
 
 
 
 943
 944	memcpy(ctx->key, keys.authkey, keys.authkeylen);
 945	memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 946
 947	ctx->keylen = keys.authkeylen + keys.enckeylen;
 948	ctx->enckeylen = keys.enckeylen;
 949	ctx->authkeylen = keys.authkeylen;
 950	ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
 951				      DMA_TO_DEVICE);
 
 
 
 
 
 
 
 
 952
 953out:
 954	memzero_explicit(&keys, sizeof(keys));
 955	return err;
 
 
 
 
 956}
 957
 958static void talitos_sg_unmap(struct device *dev,
 959			     struct talitos_edesc *edesc,
 960			     struct scatterlist *src,
 961			     struct scatterlist *dst,
 962			     unsigned int len, unsigned int offset)
 963{
 964	struct talitos_private *priv = dev_get_drvdata(dev);
 965	bool is_sec1 = has_ftr_sec1(priv);
 966	unsigned int src_nents = edesc->src_nents ? : 1;
 967	unsigned int dst_nents = edesc->dst_nents ? : 1;
 968
 969	if (is_sec1 && dst && dst_nents > 1) {
 970		dma_sync_single_for_device(dev, edesc->dma_link_tbl + offset,
 971					   len, DMA_FROM_DEVICE);
 972		sg_pcopy_from_buffer(dst, dst_nents, edesc->buf + offset, len,
 973				     offset);
 974	}
 975	if (src != dst) {
 976		if (src_nents == 1 || !is_sec1)
 
 
 977			dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
 978
 979		if (dst && (dst_nents == 1 || !is_sec1))
 980			dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
 981	} else if (src_nents == 1 || !is_sec1) {
 982		dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
 983	}
 
 
 
 
 
 
 
 
 984}
 985
 986static void ipsec_esp_unmap(struct device *dev,
 987			    struct talitos_edesc *edesc,
 988			    struct aead_request *areq, bool encrypt)
 989{
 990	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
 991	struct talitos_ctx *ctx = crypto_aead_ctx(aead);
 992	unsigned int ivsize = crypto_aead_ivsize(aead);
 993	unsigned int authsize = crypto_aead_authsize(aead);
 994	unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
 995	bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP;
 996	struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3];
 997
 998	if (is_ipsec_esp)
 999		unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
1000					 DMA_FROM_DEVICE);
1001	unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE);
1002
1003	talitos_sg_unmap(dev, edesc, areq->src, areq->dst,
1004			 cryptlen + authsize, areq->assoclen);
 
1005
1006	if (edesc->dma_len)
1007		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1008				 DMA_BIDIRECTIONAL);
1009
1010	if (!is_ipsec_esp) {
1011		unsigned int dst_nents = edesc->dst_nents ? : 1;
1012
1013		sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
1014				   areq->assoclen + cryptlen - ivsize);
1015	}
1016}
1017
1018/*
1019 * ipsec_esp descriptor callbacks
1020 */
1021static void ipsec_esp_encrypt_done(struct device *dev,
1022				   struct talitos_desc *desc, void *context,
1023				   int err)
1024{
1025	struct aead_request *areq = context;
1026	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1027	unsigned int ivsize = crypto_aead_ivsize(authenc);
1028	struct talitos_edesc *edesc;
 
 
1029
1030	edesc = container_of(desc, struct talitos_edesc, desc);
1031
1032	ipsec_esp_unmap(dev, edesc, areq, true);
1033
1034	dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
 
 
 
 
 
 
 
1035
1036	kfree(edesc);
1037
1038	aead_request_complete(areq, err);
1039}
1040
1041static void ipsec_esp_decrypt_swauth_done(struct device *dev,
1042					  struct talitos_desc *desc,
1043					  void *context, int err)
1044{
1045	struct aead_request *req = context;
1046	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1047	unsigned int authsize = crypto_aead_authsize(authenc);
1048	struct talitos_edesc *edesc;
1049	char *oicv, *icv;
 
1050
1051	edesc = container_of(desc, struct talitos_edesc, desc);
1052
1053	ipsec_esp_unmap(dev, edesc, req, false);
1054
1055	if (!err) {
1056		/* auth check */
1057		oicv = edesc->buf + edesc->dma_len;
1058		icv = oicv - authsize;
 
 
 
1059
1060		err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
 
 
1061	}
1062
1063	kfree(edesc);
1064
1065	aead_request_complete(req, err);
1066}
1067
1068static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
1069					  struct talitos_desc *desc,
1070					  void *context, int err)
1071{
1072	struct aead_request *req = context;
1073	struct talitos_edesc *edesc;
1074
1075	edesc = container_of(desc, struct talitos_edesc, desc);
1076
1077	ipsec_esp_unmap(dev, edesc, req, false);
1078
1079	/* check ICV auth status */
1080	if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1081		     DESC_HDR_LO_ICCR1_PASS))
1082		err = -EBADMSG;
1083
1084	kfree(edesc);
1085
1086	aead_request_complete(req, err);
1087}
1088
1089/*
1090 * convert scatterlist to SEC h/w link table format
1091 * stop at cryptlen bytes
1092 */
1093static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1094				 unsigned int offset, int datalen, int elen,
1095				 struct talitos_ptr *link_tbl_ptr)
1096{
1097	int n_sg = elen ? sg_count + 1 : sg_count;
1098	int count = 0;
1099	int cryptlen = datalen + elen;
1100
1101	while (cryptlen && sg && n_sg--) {
1102		unsigned int len = sg_dma_len(sg);
1103
1104		if (offset >= len) {
1105			offset -= len;
1106			goto next;
1107		}
1108
1109		len -= offset;
1110
1111		if (len > cryptlen)
1112			len = cryptlen;
1113
1114		if (datalen > 0 && len > datalen) {
1115			to_talitos_ptr(link_tbl_ptr + count,
1116				       sg_dma_address(sg) + offset, datalen, 0);
1117			to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1118			count++;
1119			len -= datalen;
1120			offset += datalen;
1121		}
1122		to_talitos_ptr(link_tbl_ptr + count,
1123			       sg_dma_address(sg) + offset, len, 0);
1124		to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1125		count++;
1126		cryptlen -= len;
1127		datalen -= len;
1128		offset = 0;
1129
1130next:
1131		sg = sg_next(sg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1132	}
 
 
1133
1134	/* tag end of link table */
1135	if (count > 0)
1136		to_talitos_ptr_ext_set(link_tbl_ptr + count - 1,
1137				       DESC_PTR_LNKTBL_RET, 0);
1138
1139	return count;
1140}
1141
1142static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
1143			      unsigned int len, struct talitos_edesc *edesc,
1144			      struct talitos_ptr *ptr, int sg_count,
1145			      unsigned int offset, int tbl_off, int elen,
1146			      bool force)
1147{
1148	struct talitos_private *priv = dev_get_drvdata(dev);
1149	bool is_sec1 = has_ftr_sec1(priv);
1150
1151	if (!src) {
1152		to_talitos_ptr(ptr, 0, 0, is_sec1);
1153		return 1;
1154	}
1155	to_talitos_ptr_ext_set(ptr, elen, is_sec1);
1156	if (sg_count == 1 && !force) {
1157		to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
1158		return sg_count;
1159	}
1160	if (is_sec1) {
1161		to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1);
1162		return sg_count;
1163	}
1164	sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len, elen,
1165					 &edesc->link_tbl[tbl_off]);
1166	if (sg_count == 1 && !force) {
1167		/* Only one segment now, so no link tbl needed*/
1168		copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
1169		return sg_count;
1170	}
1171	to_talitos_ptr(ptr, edesc->dma_link_tbl +
1172			    tbl_off * sizeof(struct talitos_ptr), len, is_sec1);
1173	to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
1174
1175	return sg_count;
1176}
1177
1178static int talitos_sg_map(struct device *dev, struct scatterlist *src,
1179			  unsigned int len, struct talitos_edesc *edesc,
1180			  struct talitos_ptr *ptr, int sg_count,
1181			  unsigned int offset, int tbl_off)
1182{
1183	return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
1184				  tbl_off, 0, false);
1185}
1186
1187/*
1188 * fill in and submit ipsec_esp descriptor
1189 */
1190static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1191		     bool encrypt,
1192		     void (*callback)(struct device *dev,
1193				      struct talitos_desc *desc,
1194				      void *context, int error))
1195{
1196	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1197	unsigned int authsize = crypto_aead_authsize(aead);
1198	struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1199	struct device *dev = ctx->dev;
1200	struct talitos_desc *desc = &edesc->desc;
1201	unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
 
1202	unsigned int ivsize = crypto_aead_ivsize(aead);
1203	int tbl_off = 0;
1204	int sg_count, ret;
1205	int elen = 0;
1206	bool sync_needed = false;
1207	struct talitos_private *priv = dev_get_drvdata(dev);
1208	bool is_sec1 = has_ftr_sec1(priv);
1209	bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP;
1210	struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3];
1211	struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2];
1212	dma_addr_t dma_icv = edesc->dma_link_tbl + edesc->dma_len - authsize;
1213
1214	/* hmac key */
1215	to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1);
1216
1217	sg_count = edesc->src_nents ?: 1;
1218	if (is_sec1 && sg_count > 1)
1219		sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1220				  areq->assoclen + cryptlen);
1221	else
1222		sg_count = dma_map_sg(dev, areq->src, sg_count,
1223				      (areq->src == areq->dst) ?
1224				      DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1225
1226	/* hmac data */
1227	ret = talitos_sg_map(dev, areq->src, areq->assoclen, edesc,
1228			     &desc->ptr[1], sg_count, 0, tbl_off);
1229
1230	if (ret > 1) {
1231		tbl_off += ret;
1232		sync_needed = true;
1233	}
1234
1235	/* cipher iv */
1236	to_talitos_ptr(civ_ptr, edesc->iv_dma, ivsize, is_sec1);
 
1237
1238	/* cipher key */
1239	to_talitos_ptr(ckey_ptr, ctx->dma_key  + ctx->authkeylen,
1240		       ctx->enckeylen, is_sec1);
 
1241
1242	/*
1243	 * cipher in
1244	 * map and adjust cipher len to aead request cryptlen.
1245	 * extent is bytes of HMAC postpended to ciphertext,
1246	 * typically 12 for ipsec
1247	 */
1248	if (is_ipsec_esp && (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
1249		elen = authsize;
1250
1251	ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
1252				 sg_count, areq->assoclen, tbl_off, elen,
1253				 false);
1254
1255	if (ret > 1) {
1256		tbl_off += ret;
1257		sync_needed = true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1258	}
1259
1260	/* cipher out */
1261	if (areq->src != areq->dst) {
1262		sg_count = edesc->dst_nents ? : 1;
1263		if (!is_sec1 || sg_count == 1)
1264			dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1265	}
1266
1267	if (is_ipsec_esp && encrypt)
1268		elen = authsize;
1269	else
1270		elen = 0;
1271	ret = talitos_sg_map_ext(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
1272				 sg_count, areq->assoclen, tbl_off, elen,
1273				 is_ipsec_esp && !encrypt);
1274	tbl_off += ret;
 
 
 
1275
1276	if (!encrypt && is_ipsec_esp) {
1277		struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
 
 
 
1278
1279		/* Add an entry to the link table for ICV data */
1280		to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
1281		to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RET, is_sec1);
 
 
 
 
1282
1283		/* icv data follows link tables */
1284		to_talitos_ptr(tbl_ptr, dma_icv, authsize, is_sec1);
1285		to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
1286		sync_needed = true;
1287	} else if (!encrypt) {
1288		to_talitos_ptr(&desc->ptr[6], dma_icv, authsize, is_sec1);
1289		sync_needed = true;
1290	} else if (!is_ipsec_esp) {
1291		talitos_sg_map(dev, areq->dst, authsize, edesc, &desc->ptr[6],
1292			       sg_count, areq->assoclen + cryptlen, tbl_off);
1293	}
1294
1295	/* iv out */
1296	if (is_ipsec_esp)
1297		map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1298				       DMA_FROM_DEVICE);
1299
1300	if (sync_needed)
1301		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1302					   edesc->dma_len,
1303					   DMA_BIDIRECTIONAL);
1304
1305	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1306	if (ret != -EINPROGRESS) {
1307		ipsec_esp_unmap(dev, edesc, areq, encrypt);
1308		kfree(edesc);
1309	}
1310	return ret;
1311}
1312
1313/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1314 * allocate and map the extended descriptor
1315 */
1316static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1317						 struct scatterlist *src,
1318						 struct scatterlist *dst,
1319						 u8 *iv,
1320						 unsigned int assoclen,
1321						 unsigned int cryptlen,
1322						 unsigned int authsize,
1323						 unsigned int ivsize,
1324						 int icv_stashing,
1325						 u32 cryptoflags,
1326						 bool encrypt)
1327{
1328	struct talitos_edesc *edesc;
1329	int src_nents, dst_nents, alloc_len, dma_len, src_len, dst_len;
1330	dma_addr_t iv_dma = 0;
1331	gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1332		      GFP_ATOMIC;
1333	struct talitos_private *priv = dev_get_drvdata(dev);
1334	bool is_sec1 = has_ftr_sec1(priv);
1335	int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1336
1337	if (cryptlen + authsize > max_len) {
1338		dev_err(dev, "length exceeds h/w max limit\n");
1339		return ERR_PTR(-EINVAL);
1340	}
1341
1342	if (!dst || dst == src) {
1343		src_len = assoclen + cryptlen + authsize;
1344		src_nents = sg_nents_for_len(src, src_len);
1345		if (src_nents < 0) {
1346			dev_err(dev, "Invalid number of src SG.\n");
1347			return ERR_PTR(-EINVAL);
1348		}
1349		src_nents = (src_nents == 1) ? 0 : src_nents;
1350		dst_nents = dst ? src_nents : 0;
1351		dst_len = 0;
1352	} else { /* dst && dst != src*/
1353		src_len = assoclen + cryptlen + (encrypt ? 0 : authsize);
1354		src_nents = sg_nents_for_len(src, src_len);
1355		if (src_nents < 0) {
1356			dev_err(dev, "Invalid number of src SG.\n");
1357			return ERR_PTR(-EINVAL);
1358		}
1359		src_nents = (src_nents == 1) ? 0 : src_nents;
1360		dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
1361		dst_nents = sg_nents_for_len(dst, dst_len);
1362		if (dst_nents < 0) {
1363			dev_err(dev, "Invalid number of dst SG.\n");
1364			return ERR_PTR(-EINVAL);
1365		}
1366		dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1367	}
1368
1369	/*
1370	 * allocate space for base edesc plus the link tables,
1371	 * allowing for two separate entries for AD and generated ICV (+ 2),
1372	 * and space for two sets of ICVs (stashed and generated)
1373	 */
1374	alloc_len = sizeof(struct talitos_edesc);
1375	if (src_nents || dst_nents || !encrypt) {
1376		if (is_sec1)
1377			dma_len = (src_nents ? src_len : 0) +
1378				  (dst_nents ? dst_len : 0) + authsize;
1379		else
1380			dma_len = (src_nents + dst_nents + 2) *
1381				  sizeof(struct talitos_ptr) + authsize;
1382		alloc_len += dma_len;
1383	} else {
1384		dma_len = 0;
 
1385	}
1386	alloc_len += icv_stashing ? authsize : 0;
1387
1388	/* if its a ahash, add space for a second desc next to the first one */
1389	if (is_sec1 && !dst)
1390		alloc_len += sizeof(struct talitos_desc);
1391	alloc_len += ivsize;
1392
1393	edesc = kmalloc(alloc_len, GFP_DMA | flags);
1394	if (!edesc)
 
1395		return ERR_PTR(-ENOMEM);
1396	if (ivsize) {
1397		iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize);
1398		iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1399	}
1400	memset(&edesc->desc, 0, sizeof(edesc->desc));
1401
1402	edesc->src_nents = src_nents;
1403	edesc->dst_nents = dst_nents;
1404	edesc->iv_dma = iv_dma;
 
1405	edesc->dma_len = dma_len;
1406	if (dma_len)
1407		edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1408						     edesc->dma_len,
1409						     DMA_BIDIRECTIONAL);
1410
1411	return edesc;
1412}
1413
1414static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1415					      int icv_stashing, bool encrypt)
1416{
1417	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1418	unsigned int authsize = crypto_aead_authsize(authenc);
1419	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1420	unsigned int ivsize = crypto_aead_ivsize(authenc);
1421	unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
1422
1423	return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1424				   iv, areq->assoclen, cryptlen,
1425				   authsize, ivsize, icv_stashing,
1426				   areq->base.flags, encrypt);
1427}
1428
1429static int aead_encrypt(struct aead_request *req)
1430{
1431	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1432	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1433	struct talitos_edesc *edesc;
1434
1435	/* allocate extended descriptor */
1436	edesc = aead_edesc_alloc(req, req->iv, 0, true);
1437	if (IS_ERR(edesc))
1438		return PTR_ERR(edesc);
1439
1440	/* set encrypt */
1441	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1442
1443	return ipsec_esp(edesc, req, true, ipsec_esp_encrypt_done);
1444}
1445
1446static int aead_decrypt(struct aead_request *req)
1447{
1448	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1449	unsigned int authsize = crypto_aead_authsize(authenc);
1450	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
 
1451	struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1452	struct talitos_edesc *edesc;
 
1453	void *icvdata;
1454
 
 
1455	/* allocate extended descriptor */
1456	edesc = aead_edesc_alloc(req, req->iv, 1, false);
1457	if (IS_ERR(edesc))
1458		return PTR_ERR(edesc);
1459
1460	if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
1461	    (priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1462	    ((!edesc->src_nents && !edesc->dst_nents) ||
1463	     priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1464
1465		/* decrypt and check the ICV */
1466		edesc->desc.hdr = ctx->desc_hdr_template |
1467				  DESC_HDR_DIR_INBOUND |
1468				  DESC_HDR_MODE1_MDEU_CICV;
1469
1470		/* reset integrity check result bits */
 
1471
1472		return ipsec_esp(edesc, req, false,
1473				 ipsec_esp_decrypt_hwauth_done);
 
1474	}
1475
1476	/* Have to check the ICV with software */
1477	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1478
1479	/* stash incoming ICV for later cmp with ICV generated by the h/w */
1480	icvdata = edesc->buf + edesc->dma_len;
 
 
 
 
 
 
1481
1482	sg_pcopy_to_buffer(req->src, edesc->src_nents ? : 1, icvdata, authsize,
1483			   req->assoclen + req->cryptlen - authsize);
1484
1485	return ipsec_esp(edesc, req, false, ipsec_esp_decrypt_swauth_done);
1486}
1487
1488static int skcipher_setkey(struct crypto_skcipher *cipher,
1489			     const u8 *key, unsigned int keylen)
1490{
1491	struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1492	struct device *dev = ctx->dev;
 
 
1493
1494	if (ctx->keylen)
1495		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
 
 
1496
1497	memcpy(&ctx->key, key, keylen);
1498	ctx->keylen = keylen;
1499
1500	ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
 
 
1501
1502	return 0;
 
1503}
1504
1505static int skcipher_des_setkey(struct crypto_skcipher *cipher,
1506				 const u8 *key, unsigned int keylen)
1507{
1508	return verify_skcipher_des_key(cipher, key) ?:
1509	       skcipher_setkey(cipher, key, keylen);
1510}
1511
1512static int skcipher_des3_setkey(struct crypto_skcipher *cipher,
1513				  const u8 *key, unsigned int keylen)
1514{
1515	return verify_skcipher_des3_key(cipher, key) ?:
1516	       skcipher_setkey(cipher, key, keylen);
1517}
1518
1519static int skcipher_aes_setkey(struct crypto_skcipher *cipher,
1520				  const u8 *key, unsigned int keylen)
1521{
1522	if (keylen == AES_KEYSIZE_128 || keylen == AES_KEYSIZE_192 ||
1523	    keylen == AES_KEYSIZE_256)
1524		return skcipher_setkey(cipher, key, keylen);
1525
1526	return -EINVAL;
1527}
1528
1529static void common_nonsnoop_unmap(struct device *dev,
1530				  struct talitos_edesc *edesc,
1531				  struct skcipher_request *areq)
1532{
1533	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1534
1535	talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen, 0);
1536	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1537
 
 
1538	if (edesc->dma_len)
1539		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1540				 DMA_BIDIRECTIONAL);
1541}
1542
1543static void skcipher_done(struct device *dev,
1544			    struct talitos_desc *desc, void *context,
1545			    int err)
1546{
1547	struct skcipher_request *areq = context;
1548	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1549	struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1550	unsigned int ivsize = crypto_skcipher_ivsize(cipher);
1551	struct talitos_edesc *edesc;
1552
1553	edesc = container_of(desc, struct talitos_edesc, desc);
1554
1555	common_nonsnoop_unmap(dev, edesc, areq);
1556	memcpy(areq->iv, ctx->iv, ivsize);
1557
1558	kfree(edesc);
1559
1560	areq->base.complete(&areq->base, err);
1561}
1562
1563static int common_nonsnoop(struct talitos_edesc *edesc,
1564			   struct skcipher_request *areq,
1565			   void (*callback) (struct device *dev,
1566					     struct talitos_desc *desc,
1567					     void *context, int error))
1568{
1569	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1570	struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1571	struct device *dev = ctx->dev;
1572	struct talitos_desc *desc = &edesc->desc;
1573	unsigned int cryptlen = areq->cryptlen;
1574	unsigned int ivsize = crypto_skcipher_ivsize(cipher);
1575	int sg_count, ret;
1576	bool sync_needed = false;
1577	struct talitos_private *priv = dev_get_drvdata(dev);
1578	bool is_sec1 = has_ftr_sec1(priv);
1579
1580	/* first DWORD empty */
 
 
 
1581
1582	/* cipher iv */
1583	to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, ivsize, is_sec1);
 
 
1584
1585	/* cipher key */
1586	to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen, is_sec1);
 
1587
1588	sg_count = edesc->src_nents ?: 1;
1589	if (is_sec1 && sg_count > 1)
1590		sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1591				  cryptlen);
1592	else
1593		sg_count = dma_map_sg(dev, areq->src, sg_count,
1594				      (areq->src == areq->dst) ?
1595				      DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1596	/*
1597	 * cipher in
1598	 */
1599	sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
1600				  &desc->ptr[3], sg_count, 0, 0);
1601	if (sg_count > 1)
1602		sync_needed = true;
1603
1604	/* cipher out */
1605	if (areq->src != areq->dst) {
1606		sg_count = edesc->dst_nents ? : 1;
1607		if (!is_sec1 || sg_count == 1)
1608			dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1609	}
1610
1611	ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[4],
1612			     sg_count, 0, (edesc->src_nents + 1));
1613	if (ret > 1)
1614		sync_needed = true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1615
1616	/* iv out */
1617	map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1618			       DMA_FROM_DEVICE);
1619
1620	/* last DWORD empty */
1621
1622	if (sync_needed)
1623		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1624					   edesc->dma_len, DMA_BIDIRECTIONAL);
1625
1626	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1627	if (ret != -EINPROGRESS) {
1628		common_nonsnoop_unmap(dev, edesc, areq);
1629		kfree(edesc);
1630	}
1631	return ret;
1632}
1633
1634static struct talitos_edesc *skcipher_edesc_alloc(struct skcipher_request *
1635						    areq, bool encrypt)
1636{
1637	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1638	struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1639	unsigned int ivsize = crypto_skcipher_ivsize(cipher);
1640
1641	return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1642				   areq->iv, 0, areq->cryptlen, 0, ivsize, 0,
1643				   areq->base.flags, encrypt);
1644}
1645
1646static int skcipher_encrypt(struct skcipher_request *areq)
1647{
1648	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1649	struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1650	struct talitos_edesc *edesc;
1651	unsigned int blocksize =
1652			crypto_tfm_alg_blocksize(crypto_skcipher_tfm(cipher));
1653
1654	if (!areq->cryptlen)
1655		return 0;
1656
1657	if (areq->cryptlen % blocksize)
1658		return -EINVAL;
1659
1660	/* allocate extended descriptor */
1661	edesc = skcipher_edesc_alloc(areq, true);
1662	if (IS_ERR(edesc))
1663		return PTR_ERR(edesc);
1664
1665	/* set encrypt */
1666	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1667
1668	return common_nonsnoop(edesc, areq, skcipher_done);
1669}
1670
1671static int skcipher_decrypt(struct skcipher_request *areq)
1672{
1673	struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1674	struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher);
1675	struct talitos_edesc *edesc;
1676	unsigned int blocksize =
1677			crypto_tfm_alg_blocksize(crypto_skcipher_tfm(cipher));
1678
1679	if (!areq->cryptlen)
1680		return 0;
1681
1682	if (areq->cryptlen % blocksize)
1683		return -EINVAL;
1684
1685	/* allocate extended descriptor */
1686	edesc = skcipher_edesc_alloc(areq, false);
1687	if (IS_ERR(edesc))
1688		return PTR_ERR(edesc);
1689
1690	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1691
1692	return common_nonsnoop(edesc, areq, skcipher_done);
1693}
1694
1695static void common_nonsnoop_hash_unmap(struct device *dev,
1696				       struct talitos_edesc *edesc,
1697				       struct ahash_request *areq)
1698{
1699	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1700	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1701	struct talitos_private *priv = dev_get_drvdata(dev);
1702	bool is_sec1 = has_ftr_sec1(priv);
1703	struct talitos_desc *desc = &edesc->desc;
1704	struct talitos_desc *desc2 = (struct talitos_desc *)
1705				     (edesc->buf + edesc->dma_len);
1706
1707	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1708	if (desc->next_desc &&
1709	    desc->ptr[5].ptr != desc2->ptr[5].ptr)
1710		unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
1711	if (req_ctx->last)
1712		memcpy(areq->result, req_ctx->hw_context,
1713		       crypto_ahash_digestsize(tfm));
1714
1715	if (req_ctx->psrc)
1716		talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
1717
1718	/* When using hashctx-in, must unmap it. */
1719	if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
1720		unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1721					 DMA_TO_DEVICE);
1722	else if (desc->next_desc)
1723		unmap_single_talitos_ptr(dev, &desc2->ptr[1],
1724					 DMA_TO_DEVICE);
1725
1726	if (is_sec1 && req_ctx->nbuf)
1727		unmap_single_talitos_ptr(dev, &desc->ptr[3],
1728					 DMA_TO_DEVICE);
1729
 
 
1730	if (edesc->dma_len)
1731		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1732				 DMA_BIDIRECTIONAL);
1733
1734	if (edesc->desc.next_desc)
1735		dma_unmap_single(dev, be32_to_cpu(edesc->desc.next_desc),
1736				 TALITOS_DESC_SIZE, DMA_BIDIRECTIONAL);
1737}
1738
1739static void ahash_done(struct device *dev,
1740		       struct talitos_desc *desc, void *context,
1741		       int err)
1742{
1743	struct ahash_request *areq = context;
1744	struct talitos_edesc *edesc =
1745		 container_of(desc, struct talitos_edesc, desc);
1746	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1747
1748	if (!req_ctx->last && req_ctx->to_hash_later) {
1749		/* Position any partial block for next update/final/finup */
1750		req_ctx->buf_idx = (req_ctx->buf_idx + 1) & 1;
1751		req_ctx->nbuf = req_ctx->to_hash_later;
1752	}
1753	common_nonsnoop_hash_unmap(dev, edesc, areq);
1754
1755	kfree(edesc);
1756
1757	areq->base.complete(&areq->base, err);
1758}
1759
1760/*
1761 * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1762 * ourself and submit a padded block
1763 */
1764static void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1765			       struct talitos_edesc *edesc,
1766			       struct talitos_ptr *ptr)
1767{
1768	static u8 padded_hash[64] = {
1769		0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1770		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1771		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1772		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1773	};
1774
1775	pr_err_once("Bug in SEC1, padding ourself\n");
1776	edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1777	map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1778			       (char *)padded_hash, DMA_TO_DEVICE);
1779}
1780
1781static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1782				struct ahash_request *areq, unsigned int length,
1783				void (*callback) (struct device *dev,
1784						  struct talitos_desc *desc,
1785						  void *context, int error))
1786{
1787	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1788	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1789	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1790	struct device *dev = ctx->dev;
1791	struct talitos_desc *desc = &edesc->desc;
1792	int ret;
1793	bool sync_needed = false;
1794	struct talitos_private *priv = dev_get_drvdata(dev);
1795	bool is_sec1 = has_ftr_sec1(priv);
1796	int sg_count;
1797
1798	/* first DWORD empty */
 
1799
1800	/* hash context in */
1801	if (!req_ctx->first || req_ctx->swinit) {
1802		map_single_talitos_ptr_nosync(dev, &desc->ptr[1],
1803					      req_ctx->hw_context_size,
1804					      req_ctx->hw_context,
1805					      DMA_TO_DEVICE);
1806		req_ctx->swinit = 0;
 
 
 
 
1807	}
1808	/* Indicate next op is not the first. */
1809	req_ctx->first = 0;
1810
1811	/* HMAC key */
1812	if (ctx->keylen)
1813		to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen,
1814			       is_sec1);
1815
1816	if (is_sec1 && req_ctx->nbuf)
1817		length -= req_ctx->nbuf;
1818
1819	sg_count = edesc->src_nents ?: 1;
1820	if (is_sec1 && sg_count > 1)
1821		sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length);
1822	else if (length)
1823		sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
1824				      DMA_TO_DEVICE);
1825	/*
1826	 * data in
1827	 */
1828	if (is_sec1 && req_ctx->nbuf) {
1829		map_single_talitos_ptr(dev, &desc->ptr[3], req_ctx->nbuf,
1830				       req_ctx->buf[req_ctx->buf_idx],
1831				       DMA_TO_DEVICE);
 
 
 
 
 
 
1832	} else {
1833		sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1834					  &desc->ptr[3], sg_count, 0, 0);
1835		if (sg_count > 1)
1836			sync_needed = true;
 
 
 
 
 
 
 
 
 
 
1837	}
1838
1839	/* fifth DWORD empty */
 
1840
1841	/* hash/HMAC out -or- hash context out */
1842	if (req_ctx->last)
1843		map_single_talitos_ptr(dev, &desc->ptr[5],
1844				       crypto_ahash_digestsize(tfm),
1845				       req_ctx->hw_context, DMA_FROM_DEVICE);
1846	else
1847		map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1848					      req_ctx->hw_context_size,
1849					      req_ctx->hw_context,
1850					      DMA_FROM_DEVICE);
1851
1852	/* last DWORD empty */
1853
1854	if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1855		talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1856
1857	if (is_sec1 && req_ctx->nbuf && length) {
1858		struct talitos_desc *desc2 = (struct talitos_desc *)
1859					     (edesc->buf + edesc->dma_len);
1860		dma_addr_t next_desc;
1861
1862		memset(desc2, 0, sizeof(*desc2));
1863		desc2->hdr = desc->hdr;
1864		desc2->hdr &= ~DESC_HDR_MODE0_MDEU_INIT;
1865		desc2->hdr1 = desc2->hdr;
1866		desc->hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1867		desc->hdr |= DESC_HDR_MODE0_MDEU_CONT;
1868		desc->hdr &= ~DESC_HDR_DONE_NOTIFY;
1869
1870		if (desc->ptr[1].ptr)
1871			copy_talitos_ptr(&desc2->ptr[1], &desc->ptr[1],
1872					 is_sec1);
1873		else
1874			map_single_talitos_ptr_nosync(dev, &desc2->ptr[1],
1875						      req_ctx->hw_context_size,
1876						      req_ctx->hw_context,
1877						      DMA_TO_DEVICE);
1878		copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
1879		sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1880					  &desc2->ptr[3], sg_count, 0, 0);
1881		if (sg_count > 1)
1882			sync_needed = true;
1883		copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
1884		if (req_ctx->last)
1885			map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1886						      req_ctx->hw_context_size,
1887						      req_ctx->hw_context,
1888						      DMA_FROM_DEVICE);
1889
1890		next_desc = dma_map_single(dev, &desc2->hdr1, TALITOS_DESC_SIZE,
1891					   DMA_BIDIRECTIONAL);
1892		desc->next_desc = cpu_to_be32(next_desc);
1893	}
1894
1895	if (sync_needed)
1896		dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1897					   edesc->dma_len, DMA_BIDIRECTIONAL);
1898
1899	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1900	if (ret != -EINPROGRESS) {
1901		common_nonsnoop_hash_unmap(dev, edesc, areq);
1902		kfree(edesc);
1903	}
1904	return ret;
1905}
1906
1907static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1908					       unsigned int nbytes)
1909{
1910	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1911	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1912	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1913	struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1914	bool is_sec1 = has_ftr_sec1(priv);
1915
1916	if (is_sec1)
1917		nbytes -= req_ctx->nbuf;
1918
1919	return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
1920				   nbytes, 0, 0, 0, areq->base.flags, false);
1921}
1922
1923static int ahash_init(struct ahash_request *areq)
1924{
1925	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1926	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1927	struct device *dev = ctx->dev;
1928	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1929	unsigned int size;
1930	dma_addr_t dma;
1931
1932	/* Initialize the context */
1933	req_ctx->buf_idx = 0;
1934	req_ctx->nbuf = 0;
1935	req_ctx->first = 1; /* first indicates h/w must init its context */
1936	req_ctx->swinit = 0; /* assume h/w init of context */
1937	size =	(crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
 
1938			? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1939			: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1940	req_ctx->hw_context_size = size;
1941
1942	dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
1943			     DMA_TO_DEVICE);
1944	dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
1945
1946	return 0;
1947}
1948
1949/*
1950 * on h/w without explicit sha224 support, we initialize h/w context
1951 * manually with sha224 constants, and tell it to run sha256.
1952 */
1953static int ahash_init_sha224_swinit(struct ahash_request *areq)
1954{
1955	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1956
 
 
 
1957	req_ctx->hw_context[0] = SHA224_H0;
1958	req_ctx->hw_context[1] = SHA224_H1;
1959	req_ctx->hw_context[2] = SHA224_H2;
1960	req_ctx->hw_context[3] = SHA224_H3;
1961	req_ctx->hw_context[4] = SHA224_H4;
1962	req_ctx->hw_context[5] = SHA224_H5;
1963	req_ctx->hw_context[6] = SHA224_H6;
1964	req_ctx->hw_context[7] = SHA224_H7;
1965
1966	/* init 64-bit count */
1967	req_ctx->hw_context[8] = 0;
1968	req_ctx->hw_context[9] = 0;
1969
1970	ahash_init(areq);
1971	req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1972
1973	return 0;
1974}
1975
1976static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1977{
1978	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1979	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1980	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1981	struct talitos_edesc *edesc;
1982	unsigned int blocksize =
1983			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1984	unsigned int nbytes_to_hash;
1985	unsigned int to_hash_later;
1986	unsigned int nsg;
1987	int nents;
1988	struct device *dev = ctx->dev;
1989	struct talitos_private *priv = dev_get_drvdata(dev);
1990	bool is_sec1 = has_ftr_sec1(priv);
1991	u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
1992
1993	if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1994		/* Buffer up to one whole block */
1995		nents = sg_nents_for_len(areq->src, nbytes);
1996		if (nents < 0) {
1997			dev_err(ctx->dev, "Invalid number of src SG.\n");
1998			return nents;
1999		}
2000		sg_copy_to_buffer(areq->src, nents,
2001				  ctx_buf + req_ctx->nbuf, nbytes);
2002		req_ctx->nbuf += nbytes;
2003		return 0;
2004	}
2005
2006	/* At least (blocksize + 1) bytes are available to hash */
2007	nbytes_to_hash = nbytes + req_ctx->nbuf;
2008	to_hash_later = nbytes_to_hash & (blocksize - 1);
2009
2010	if (req_ctx->last)
2011		to_hash_later = 0;
2012	else if (to_hash_later)
2013		/* There is a partial block. Hash the full block(s) now */
2014		nbytes_to_hash -= to_hash_later;
2015	else {
2016		/* Keep one block buffered */
2017		nbytes_to_hash -= blocksize;
2018		to_hash_later = blocksize;
2019	}
2020
2021	/* Chain in any previously buffered data */
2022	if (!is_sec1 && req_ctx->nbuf) {
2023		nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
2024		sg_init_table(req_ctx->bufsl, nsg);
2025		sg_set_buf(req_ctx->bufsl, ctx_buf, req_ctx->nbuf);
2026		if (nsg > 1)
2027			sg_chain(req_ctx->bufsl, 2, areq->src);
2028		req_ctx->psrc = req_ctx->bufsl;
2029	} else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
2030		int offset;
2031
2032		if (nbytes_to_hash > blocksize)
2033			offset = blocksize - req_ctx->nbuf;
2034		else
2035			offset = nbytes_to_hash - req_ctx->nbuf;
2036		nents = sg_nents_for_len(areq->src, offset);
2037		if (nents < 0) {
2038			dev_err(ctx->dev, "Invalid number of src SG.\n");
2039			return nents;
2040		}
2041		sg_copy_to_buffer(areq->src, nents,
2042				  ctx_buf + req_ctx->nbuf, offset);
2043		req_ctx->nbuf += offset;
2044		req_ctx->psrc = scatterwalk_ffwd(req_ctx->bufsl, areq->src,
2045						 offset);
2046	} else
2047		req_ctx->psrc = areq->src;
2048
2049	if (to_hash_later) {
2050		nents = sg_nents_for_len(areq->src, nbytes);
2051		if (nents < 0) {
2052			dev_err(ctx->dev, "Invalid number of src SG.\n");
2053			return nents;
2054		}
2055		sg_pcopy_to_buffer(areq->src, nents,
2056				   req_ctx->buf[(req_ctx->buf_idx + 1) & 1],
2057				      to_hash_later,
2058				      nbytes - to_hash_later);
2059	}
2060	req_ctx->to_hash_later = to_hash_later;
2061
2062	/* Allocate extended descriptor */
2063	edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
2064	if (IS_ERR(edesc))
2065		return PTR_ERR(edesc);
2066
2067	edesc->desc.hdr = ctx->desc_hdr_template;
2068
2069	/* On last one, request SEC to pad; otherwise continue */
2070	if (req_ctx->last)
2071		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
2072	else
2073		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
2074
2075	/* request SEC to INIT hash. */
2076	if (req_ctx->first && !req_ctx->swinit)
2077		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
2078
2079	/* When the tfm context has a keylen, it's an HMAC.
2080	 * A first or last (ie. not middle) descriptor must request HMAC.
2081	 */
2082	if (ctx->keylen && (req_ctx->first || req_ctx->last))
2083		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
2084
2085	return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, ahash_done);
 
2086}
2087
2088static int ahash_update(struct ahash_request *areq)
2089{
2090	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2091
2092	req_ctx->last = 0;
2093
2094	return ahash_process_req(areq, areq->nbytes);
2095}
2096
2097static int ahash_final(struct ahash_request *areq)
2098{
2099	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2100
2101	req_ctx->last = 1;
2102
2103	return ahash_process_req(areq, 0);
2104}
2105
2106static int ahash_finup(struct ahash_request *areq)
2107{
2108	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2109
2110	req_ctx->last = 1;
2111
2112	return ahash_process_req(areq, areq->nbytes);
2113}
2114
2115static int ahash_digest(struct ahash_request *areq)
2116{
2117	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2118	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
2119
2120	ahash->init(areq);
2121	req_ctx->last = 1;
2122
2123	return ahash_process_req(areq, areq->nbytes);
2124}
2125
2126static int ahash_export(struct ahash_request *areq, void *out)
2127{
2128	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2129	struct talitos_export_state *export = out;
2130	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2131	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2132	struct device *dev = ctx->dev;
2133	dma_addr_t dma;
2134
2135	dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2136			     DMA_FROM_DEVICE);
2137	dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_FROM_DEVICE);
2138
2139	memcpy(export->hw_context, req_ctx->hw_context,
2140	       req_ctx->hw_context_size);
2141	memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf);
2142	export->swinit = req_ctx->swinit;
2143	export->first = req_ctx->first;
2144	export->last = req_ctx->last;
2145	export->to_hash_later = req_ctx->to_hash_later;
2146	export->nbuf = req_ctx->nbuf;
2147
2148	return 0;
2149}
2150
2151static int ahash_import(struct ahash_request *areq, const void *in)
2152{
2153	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2154	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2155	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2156	struct device *dev = ctx->dev;
2157	const struct talitos_export_state *export = in;
2158	unsigned int size;
2159	dma_addr_t dma;
2160
2161	memset(req_ctx, 0, sizeof(*req_ctx));
2162	size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
2163			? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2164			: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
2165	req_ctx->hw_context_size = size;
2166	memcpy(req_ctx->hw_context, export->hw_context, size);
2167	memcpy(req_ctx->buf[0], export->buf, export->nbuf);
2168	req_ctx->swinit = export->swinit;
2169	req_ctx->first = export->first;
2170	req_ctx->last = export->last;
2171	req_ctx->to_hash_later = export->to_hash_later;
2172	req_ctx->nbuf = export->nbuf;
2173
2174	dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2175			     DMA_TO_DEVICE);
2176	dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
2177
2178	return 0;
 
2179}
2180
2181static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2182		   u8 *hash)
2183{
2184	struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2185
2186	struct scatterlist sg[1];
2187	struct ahash_request *req;
2188	struct crypto_wait wait;
2189	int ret;
2190
2191	crypto_init_wait(&wait);
2192
2193	req = ahash_request_alloc(tfm, GFP_KERNEL);
2194	if (!req)
2195		return -ENOMEM;
2196
2197	/* Keep tfm keylen == 0 during hash of the long key */
2198	ctx->keylen = 0;
2199	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2200				   crypto_req_done, &wait);
2201
2202	sg_init_one(&sg[0], key, keylen);
2203
2204	ahash_request_set_crypt(req, sg, hash, keylen);
2205	ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
2206
 
 
 
 
 
 
 
 
 
 
 
 
2207	ahash_request_free(req);
2208
2209	return ret;
2210}
2211
2212static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2213			unsigned int keylen)
2214{
2215	struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2216	struct device *dev = ctx->dev;
2217	unsigned int blocksize =
2218			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2219	unsigned int digestsize = crypto_ahash_digestsize(tfm);
2220	unsigned int keysize = keylen;
2221	u8 hash[SHA512_DIGEST_SIZE];
2222	int ret;
2223
2224	if (keylen <= blocksize)
2225		memcpy(ctx->key, key, keysize);
2226	else {
2227		/* Must get the hash of the long key */
2228		ret = keyhash(tfm, key, keylen, hash);
2229
2230		if (ret)
 
2231			return -EINVAL;
 
2232
2233		keysize = digestsize;
2234		memcpy(ctx->key, hash, digestsize);
2235	}
2236
2237	if (ctx->keylen)
2238		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
2239
2240	ctx->keylen = keysize;
2241	ctx->dma_key = dma_map_single(dev, ctx->key, keysize, DMA_TO_DEVICE);
2242
2243	return 0;
2244}
2245
2246
2247struct talitos_alg_template {
2248	u32 type;
2249	u32 priority;
2250	union {
2251		struct skcipher_alg skcipher;
2252		struct ahash_alg hash;
2253		struct aead_alg aead;
2254	} alg;
2255	__be32 desc_hdr_template;
2256};
2257
2258static struct talitos_alg_template driver_algs[] = {
2259	/* AEAD algorithms.  These use a single-pass ipsec_esp descriptor */
2260	{	.type = CRYPTO_ALG_TYPE_AEAD,
2261		.alg.aead = {
2262			.base = {
2263				.cra_name = "authenc(hmac(sha1),cbc(aes))",
2264				.cra_driver_name = "authenc-hmac-sha1-"
2265						   "cbc-aes-talitos",
2266				.cra_blocksize = AES_BLOCK_SIZE,
2267				.cra_flags = CRYPTO_ALG_ASYNC |
2268					     CRYPTO_ALG_ALLOCATES_MEMORY,
2269			},
2270			.ivsize = AES_BLOCK_SIZE,
2271			.maxauthsize = SHA1_DIGEST_SIZE,
 
 
 
 
 
2272		},
2273		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2274			             DESC_HDR_SEL0_AESU |
2275		                     DESC_HDR_MODE0_AESU_CBC |
2276		                     DESC_HDR_SEL1_MDEUA |
2277		                     DESC_HDR_MODE1_MDEU_INIT |
2278		                     DESC_HDR_MODE1_MDEU_PAD |
2279		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2280	},
2281	{	.type = CRYPTO_ALG_TYPE_AEAD,
2282		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2283		.alg.aead = {
2284			.base = {
2285				.cra_name = "authenc(hmac(sha1),cbc(aes))",
2286				.cra_driver_name = "authenc-hmac-sha1-"
2287						   "cbc-aes-talitos-hsna",
2288				.cra_blocksize = AES_BLOCK_SIZE,
2289				.cra_flags = CRYPTO_ALG_ASYNC |
2290					     CRYPTO_ALG_ALLOCATES_MEMORY,
2291			},
2292			.ivsize = AES_BLOCK_SIZE,
2293			.maxauthsize = SHA1_DIGEST_SIZE,
2294		},
2295		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2296				     DESC_HDR_SEL0_AESU |
2297				     DESC_HDR_MODE0_AESU_CBC |
2298				     DESC_HDR_SEL1_MDEUA |
2299				     DESC_HDR_MODE1_MDEU_INIT |
2300				     DESC_HDR_MODE1_MDEU_PAD |
2301				     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2302	},
2303	{	.type = CRYPTO_ALG_TYPE_AEAD,
2304		.alg.aead = {
2305			.base = {
2306				.cra_name = "authenc(hmac(sha1),"
2307					    "cbc(des3_ede))",
2308				.cra_driver_name = "authenc-hmac-sha1-"
2309						   "cbc-3des-talitos",
2310				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2311				.cra_flags = CRYPTO_ALG_ASYNC |
2312					     CRYPTO_ALG_ALLOCATES_MEMORY,
2313			},
2314			.ivsize = DES3_EDE_BLOCK_SIZE,
2315			.maxauthsize = SHA1_DIGEST_SIZE,
2316			.setkey = aead_des3_setkey,
2317		},
2318		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2319			             DESC_HDR_SEL0_DEU |
2320		                     DESC_HDR_MODE0_DEU_CBC |
2321		                     DESC_HDR_MODE0_DEU_3DES |
2322		                     DESC_HDR_SEL1_MDEUA |
2323		                     DESC_HDR_MODE1_MDEU_INIT |
2324		                     DESC_HDR_MODE1_MDEU_PAD |
2325		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2326	},
2327	{	.type = CRYPTO_ALG_TYPE_AEAD,
2328		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2329		.alg.aead = {
2330			.base = {
2331				.cra_name = "authenc(hmac(sha1),"
2332					    "cbc(des3_ede))",
2333				.cra_driver_name = "authenc-hmac-sha1-"
2334						   "cbc-3des-talitos-hsna",
2335				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2336				.cra_flags = CRYPTO_ALG_ASYNC |
2337					     CRYPTO_ALG_ALLOCATES_MEMORY,
2338			},
2339			.ivsize = DES3_EDE_BLOCK_SIZE,
2340			.maxauthsize = SHA1_DIGEST_SIZE,
2341			.setkey = aead_des3_setkey,
2342		},
2343		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2344				     DESC_HDR_SEL0_DEU |
2345				     DESC_HDR_MODE0_DEU_CBC |
2346				     DESC_HDR_MODE0_DEU_3DES |
2347				     DESC_HDR_SEL1_MDEUA |
2348				     DESC_HDR_MODE1_MDEU_INIT |
2349				     DESC_HDR_MODE1_MDEU_PAD |
2350				     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2351	},
2352	{       .type = CRYPTO_ALG_TYPE_AEAD,
2353		.alg.aead = {
2354			.base = {
2355				.cra_name = "authenc(hmac(sha224),cbc(aes))",
2356				.cra_driver_name = "authenc-hmac-sha224-"
2357						   "cbc-aes-talitos",
2358				.cra_blocksize = AES_BLOCK_SIZE,
2359				.cra_flags = CRYPTO_ALG_ASYNC |
2360					     CRYPTO_ALG_ALLOCATES_MEMORY,
2361			},
2362			.ivsize = AES_BLOCK_SIZE,
2363			.maxauthsize = SHA224_DIGEST_SIZE,
2364		},
2365		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2366				     DESC_HDR_SEL0_AESU |
2367				     DESC_HDR_MODE0_AESU_CBC |
2368				     DESC_HDR_SEL1_MDEUA |
2369				     DESC_HDR_MODE1_MDEU_INIT |
2370				     DESC_HDR_MODE1_MDEU_PAD |
2371				     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2372	},
2373	{       .type = CRYPTO_ALG_TYPE_AEAD,
2374		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2375		.alg.aead = {
2376			.base = {
2377				.cra_name = "authenc(hmac(sha224),cbc(aes))",
2378				.cra_driver_name = "authenc-hmac-sha224-"
2379						   "cbc-aes-talitos-hsna",
2380				.cra_blocksize = AES_BLOCK_SIZE,
2381				.cra_flags = CRYPTO_ALG_ASYNC |
2382					     CRYPTO_ALG_ALLOCATES_MEMORY,
2383			},
2384			.ivsize = AES_BLOCK_SIZE,
2385			.maxauthsize = SHA224_DIGEST_SIZE,
2386		},
2387		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2388				     DESC_HDR_SEL0_AESU |
2389				     DESC_HDR_MODE0_AESU_CBC |
2390				     DESC_HDR_SEL1_MDEUA |
2391				     DESC_HDR_MODE1_MDEU_INIT |
2392				     DESC_HDR_MODE1_MDEU_PAD |
2393				     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2394	},
2395	{	.type = CRYPTO_ALG_TYPE_AEAD,
2396		.alg.aead = {
2397			.base = {
2398				.cra_name = "authenc(hmac(sha224),"
2399					    "cbc(des3_ede))",
2400				.cra_driver_name = "authenc-hmac-sha224-"
2401						   "cbc-3des-talitos",
2402				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2403				.cra_flags = CRYPTO_ALG_ASYNC |
2404					     CRYPTO_ALG_ALLOCATES_MEMORY,
2405			},
2406			.ivsize = DES3_EDE_BLOCK_SIZE,
2407			.maxauthsize = SHA224_DIGEST_SIZE,
2408			.setkey = aead_des3_setkey,
2409		},
2410		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2411			             DESC_HDR_SEL0_DEU |
2412		                     DESC_HDR_MODE0_DEU_CBC |
2413		                     DESC_HDR_MODE0_DEU_3DES |
2414		                     DESC_HDR_SEL1_MDEUA |
2415		                     DESC_HDR_MODE1_MDEU_INIT |
2416		                     DESC_HDR_MODE1_MDEU_PAD |
2417		                     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2418	},
2419	{	.type = CRYPTO_ALG_TYPE_AEAD,
2420		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2421		.alg.aead = {
2422			.base = {
2423				.cra_name = "authenc(hmac(sha224),"
2424					    "cbc(des3_ede))",
2425				.cra_driver_name = "authenc-hmac-sha224-"
2426						   "cbc-3des-talitos-hsna",
2427				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2428				.cra_flags = CRYPTO_ALG_ASYNC |
2429					     CRYPTO_ALG_ALLOCATES_MEMORY,
2430			},
2431			.ivsize = DES3_EDE_BLOCK_SIZE,
2432			.maxauthsize = SHA224_DIGEST_SIZE,
2433			.setkey = aead_des3_setkey,
2434		},
2435		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2436				     DESC_HDR_SEL0_DEU |
2437				     DESC_HDR_MODE0_DEU_CBC |
2438				     DESC_HDR_MODE0_DEU_3DES |
2439				     DESC_HDR_SEL1_MDEUA |
2440				     DESC_HDR_MODE1_MDEU_INIT |
2441				     DESC_HDR_MODE1_MDEU_PAD |
2442				     DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2443	},
2444	{	.type = CRYPTO_ALG_TYPE_AEAD,
2445		.alg.aead = {
2446			.base = {
2447				.cra_name = "authenc(hmac(sha256),cbc(aes))",
2448				.cra_driver_name = "authenc-hmac-sha256-"
2449						   "cbc-aes-talitos",
2450				.cra_blocksize = AES_BLOCK_SIZE,
2451				.cra_flags = CRYPTO_ALG_ASYNC |
2452					     CRYPTO_ALG_ALLOCATES_MEMORY,
2453			},
2454			.ivsize = AES_BLOCK_SIZE,
2455			.maxauthsize = SHA256_DIGEST_SIZE,
2456		},
2457		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2458			             DESC_HDR_SEL0_AESU |
2459		                     DESC_HDR_MODE0_AESU_CBC |
2460		                     DESC_HDR_SEL1_MDEUA |
2461		                     DESC_HDR_MODE1_MDEU_INIT |
2462		                     DESC_HDR_MODE1_MDEU_PAD |
2463		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2464	},
2465	{	.type = CRYPTO_ALG_TYPE_AEAD,
2466		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2467		.alg.aead = {
2468			.base = {
2469				.cra_name = "authenc(hmac(sha256),cbc(aes))",
2470				.cra_driver_name = "authenc-hmac-sha256-"
2471						   "cbc-aes-talitos-hsna",
2472				.cra_blocksize = AES_BLOCK_SIZE,
2473				.cra_flags = CRYPTO_ALG_ASYNC |
2474					     CRYPTO_ALG_ALLOCATES_MEMORY,
2475			},
2476			.ivsize = AES_BLOCK_SIZE,
2477			.maxauthsize = SHA256_DIGEST_SIZE,
2478		},
2479		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2480				     DESC_HDR_SEL0_AESU |
2481				     DESC_HDR_MODE0_AESU_CBC |
2482				     DESC_HDR_SEL1_MDEUA |
2483				     DESC_HDR_MODE1_MDEU_INIT |
2484				     DESC_HDR_MODE1_MDEU_PAD |
2485				     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2486	},
2487	{	.type = CRYPTO_ALG_TYPE_AEAD,
2488		.alg.aead = {
2489			.base = {
2490				.cra_name = "authenc(hmac(sha256),"
2491					    "cbc(des3_ede))",
2492				.cra_driver_name = "authenc-hmac-sha256-"
2493						   "cbc-3des-talitos",
2494				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2495				.cra_flags = CRYPTO_ALG_ASYNC |
2496					     CRYPTO_ALG_ALLOCATES_MEMORY,
2497			},
2498			.ivsize = DES3_EDE_BLOCK_SIZE,
2499			.maxauthsize = SHA256_DIGEST_SIZE,
2500			.setkey = aead_des3_setkey,
2501		},
2502		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2503			             DESC_HDR_SEL0_DEU |
2504		                     DESC_HDR_MODE0_DEU_CBC |
2505		                     DESC_HDR_MODE0_DEU_3DES |
2506		                     DESC_HDR_SEL1_MDEUA |
2507		                     DESC_HDR_MODE1_MDEU_INIT |
2508		                     DESC_HDR_MODE1_MDEU_PAD |
2509		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2510	},
2511	{	.type = CRYPTO_ALG_TYPE_AEAD,
2512		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2513		.alg.aead = {
2514			.base = {
2515				.cra_name = "authenc(hmac(sha256),"
2516					    "cbc(des3_ede))",
2517				.cra_driver_name = "authenc-hmac-sha256-"
2518						   "cbc-3des-talitos-hsna",
2519				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2520				.cra_flags = CRYPTO_ALG_ASYNC |
2521					     CRYPTO_ALG_ALLOCATES_MEMORY,
2522			},
2523			.ivsize = DES3_EDE_BLOCK_SIZE,
2524			.maxauthsize = SHA256_DIGEST_SIZE,
2525			.setkey = aead_des3_setkey,
2526		},
2527		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2528				     DESC_HDR_SEL0_DEU |
2529				     DESC_HDR_MODE0_DEU_CBC |
2530				     DESC_HDR_MODE0_DEU_3DES |
2531				     DESC_HDR_SEL1_MDEUA |
2532				     DESC_HDR_MODE1_MDEU_INIT |
2533				     DESC_HDR_MODE1_MDEU_PAD |
2534				     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2535	},
2536	{	.type = CRYPTO_ALG_TYPE_AEAD,
2537		.alg.aead = {
2538			.base = {
2539				.cra_name = "authenc(hmac(sha384),cbc(aes))",
2540				.cra_driver_name = "authenc-hmac-sha384-"
2541						   "cbc-aes-talitos",
2542				.cra_blocksize = AES_BLOCK_SIZE,
2543				.cra_flags = CRYPTO_ALG_ASYNC |
2544					     CRYPTO_ALG_ALLOCATES_MEMORY,
2545			},
2546			.ivsize = AES_BLOCK_SIZE,
2547			.maxauthsize = SHA384_DIGEST_SIZE,
2548		},
2549		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2550			             DESC_HDR_SEL0_AESU |
2551		                     DESC_HDR_MODE0_AESU_CBC |
2552		                     DESC_HDR_SEL1_MDEUB |
2553		                     DESC_HDR_MODE1_MDEU_INIT |
2554		                     DESC_HDR_MODE1_MDEU_PAD |
2555		                     DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2556	},
2557	{	.type = CRYPTO_ALG_TYPE_AEAD,
2558		.alg.aead = {
2559			.base = {
2560				.cra_name = "authenc(hmac(sha384),"
2561					    "cbc(des3_ede))",
2562				.cra_driver_name = "authenc-hmac-sha384-"
2563						   "cbc-3des-talitos",
2564				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2565				.cra_flags = CRYPTO_ALG_ASYNC |
2566					     CRYPTO_ALG_ALLOCATES_MEMORY,
2567			},
2568			.ivsize = DES3_EDE_BLOCK_SIZE,
2569			.maxauthsize = SHA384_DIGEST_SIZE,
2570			.setkey = aead_des3_setkey,
2571		},
2572		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2573			             DESC_HDR_SEL0_DEU |
2574		                     DESC_HDR_MODE0_DEU_CBC |
2575		                     DESC_HDR_MODE0_DEU_3DES |
2576		                     DESC_HDR_SEL1_MDEUB |
2577		                     DESC_HDR_MODE1_MDEU_INIT |
2578		                     DESC_HDR_MODE1_MDEU_PAD |
2579		                     DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2580	},
2581	{	.type = CRYPTO_ALG_TYPE_AEAD,
2582		.alg.aead = {
2583			.base = {
2584				.cra_name = "authenc(hmac(sha512),cbc(aes))",
2585				.cra_driver_name = "authenc-hmac-sha512-"
2586						   "cbc-aes-talitos",
2587				.cra_blocksize = AES_BLOCK_SIZE,
2588				.cra_flags = CRYPTO_ALG_ASYNC |
2589					     CRYPTO_ALG_ALLOCATES_MEMORY,
2590			},
2591			.ivsize = AES_BLOCK_SIZE,
2592			.maxauthsize = SHA512_DIGEST_SIZE,
2593		},
2594		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2595			             DESC_HDR_SEL0_AESU |
2596		                     DESC_HDR_MODE0_AESU_CBC |
2597		                     DESC_HDR_SEL1_MDEUB |
2598		                     DESC_HDR_MODE1_MDEU_INIT |
2599		                     DESC_HDR_MODE1_MDEU_PAD |
2600		                     DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2601	},
2602	{	.type = CRYPTO_ALG_TYPE_AEAD,
2603		.alg.aead = {
2604			.base = {
2605				.cra_name = "authenc(hmac(sha512),"
2606					    "cbc(des3_ede))",
2607				.cra_driver_name = "authenc-hmac-sha512-"
2608						   "cbc-3des-talitos",
2609				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2610				.cra_flags = CRYPTO_ALG_ASYNC |
2611					     CRYPTO_ALG_ALLOCATES_MEMORY,
2612			},
2613			.ivsize = DES3_EDE_BLOCK_SIZE,
2614			.maxauthsize = SHA512_DIGEST_SIZE,
2615			.setkey = aead_des3_setkey,
2616		},
2617		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2618			             DESC_HDR_SEL0_DEU |
2619		                     DESC_HDR_MODE0_DEU_CBC |
2620		                     DESC_HDR_MODE0_DEU_3DES |
2621		                     DESC_HDR_SEL1_MDEUB |
2622		                     DESC_HDR_MODE1_MDEU_INIT |
2623		                     DESC_HDR_MODE1_MDEU_PAD |
2624		                     DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2625	},
2626	{	.type = CRYPTO_ALG_TYPE_AEAD,
2627		.alg.aead = {
2628			.base = {
2629				.cra_name = "authenc(hmac(md5),cbc(aes))",
2630				.cra_driver_name = "authenc-hmac-md5-"
2631						   "cbc-aes-talitos",
2632				.cra_blocksize = AES_BLOCK_SIZE,
2633				.cra_flags = CRYPTO_ALG_ASYNC |
2634					     CRYPTO_ALG_ALLOCATES_MEMORY,
2635			},
2636			.ivsize = AES_BLOCK_SIZE,
2637			.maxauthsize = MD5_DIGEST_SIZE,
2638		},
2639		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2640			             DESC_HDR_SEL0_AESU |
2641		                     DESC_HDR_MODE0_AESU_CBC |
2642		                     DESC_HDR_SEL1_MDEUA |
2643		                     DESC_HDR_MODE1_MDEU_INIT |
2644		                     DESC_HDR_MODE1_MDEU_PAD |
2645		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2646	},
2647	{	.type = CRYPTO_ALG_TYPE_AEAD,
2648		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2649		.alg.aead = {
2650			.base = {
2651				.cra_name = "authenc(hmac(md5),cbc(aes))",
2652				.cra_driver_name = "authenc-hmac-md5-"
2653						   "cbc-aes-talitos-hsna",
2654				.cra_blocksize = AES_BLOCK_SIZE,
2655				.cra_flags = CRYPTO_ALG_ASYNC |
2656					     CRYPTO_ALG_ALLOCATES_MEMORY,
2657			},
2658			.ivsize = AES_BLOCK_SIZE,
2659			.maxauthsize = MD5_DIGEST_SIZE,
2660		},
2661		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2662				     DESC_HDR_SEL0_AESU |
2663				     DESC_HDR_MODE0_AESU_CBC |
2664				     DESC_HDR_SEL1_MDEUA |
2665				     DESC_HDR_MODE1_MDEU_INIT |
2666				     DESC_HDR_MODE1_MDEU_PAD |
2667				     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2668	},
2669	{	.type = CRYPTO_ALG_TYPE_AEAD,
2670		.alg.aead = {
2671			.base = {
2672				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2673				.cra_driver_name = "authenc-hmac-md5-"
2674						   "cbc-3des-talitos",
2675				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2676				.cra_flags = CRYPTO_ALG_ASYNC |
2677					     CRYPTO_ALG_ALLOCATES_MEMORY,
2678			},
2679			.ivsize = DES3_EDE_BLOCK_SIZE,
2680			.maxauthsize = MD5_DIGEST_SIZE,
2681			.setkey = aead_des3_setkey,
2682		},
2683		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2684			             DESC_HDR_SEL0_DEU |
2685		                     DESC_HDR_MODE0_DEU_CBC |
2686		                     DESC_HDR_MODE0_DEU_3DES |
2687		                     DESC_HDR_SEL1_MDEUA |
2688		                     DESC_HDR_MODE1_MDEU_INIT |
2689		                     DESC_HDR_MODE1_MDEU_PAD |
2690		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2691	},
2692	{	.type = CRYPTO_ALG_TYPE_AEAD,
2693		.priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2694		.alg.aead = {
2695			.base = {
2696				.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2697				.cra_driver_name = "authenc-hmac-md5-"
2698						   "cbc-3des-talitos-hsna",
2699				.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2700				.cra_flags = CRYPTO_ALG_ASYNC |
2701					     CRYPTO_ALG_ALLOCATES_MEMORY,
2702			},
2703			.ivsize = DES3_EDE_BLOCK_SIZE,
2704			.maxauthsize = MD5_DIGEST_SIZE,
2705			.setkey = aead_des3_setkey,
2706		},
2707		.desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2708				     DESC_HDR_SEL0_DEU |
2709				     DESC_HDR_MODE0_DEU_CBC |
2710				     DESC_HDR_MODE0_DEU_3DES |
2711				     DESC_HDR_SEL1_MDEUA |
2712				     DESC_HDR_MODE1_MDEU_INIT |
2713				     DESC_HDR_MODE1_MDEU_PAD |
2714				     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2715	},
2716	/* SKCIPHER algorithms. */
2717	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
2718		.alg.skcipher = {
2719			.base.cra_name = "ecb(aes)",
2720			.base.cra_driver_name = "ecb-aes-talitos",
2721			.base.cra_blocksize = AES_BLOCK_SIZE,
2722			.base.cra_flags = CRYPTO_ALG_ASYNC |
2723					  CRYPTO_ALG_ALLOCATES_MEMORY,
2724			.min_keysize = AES_MIN_KEY_SIZE,
2725			.max_keysize = AES_MAX_KEY_SIZE,
2726			.setkey = skcipher_aes_setkey,
2727		},
2728		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2729				     DESC_HDR_SEL0_AESU,
2730	},
2731	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
2732		.alg.skcipher = {
2733			.base.cra_name = "cbc(aes)",
2734			.base.cra_driver_name = "cbc-aes-talitos",
2735			.base.cra_blocksize = AES_BLOCK_SIZE,
2736			.base.cra_flags = CRYPTO_ALG_ASYNC |
2737					  CRYPTO_ALG_ALLOCATES_MEMORY,
2738			.min_keysize = AES_MIN_KEY_SIZE,
2739			.max_keysize = AES_MAX_KEY_SIZE,
2740			.ivsize = AES_BLOCK_SIZE,
2741			.setkey = skcipher_aes_setkey,
2742		},
2743		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2744				     DESC_HDR_SEL0_AESU |
2745				     DESC_HDR_MODE0_AESU_CBC,
2746	},
2747	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
2748		.alg.skcipher = {
2749			.base.cra_name = "ctr(aes)",
2750			.base.cra_driver_name = "ctr-aes-talitos",
2751			.base.cra_blocksize = 1,
2752			.base.cra_flags = CRYPTO_ALG_ASYNC |
2753					  CRYPTO_ALG_ALLOCATES_MEMORY,
2754			.min_keysize = AES_MIN_KEY_SIZE,
2755			.max_keysize = AES_MAX_KEY_SIZE,
2756			.ivsize = AES_BLOCK_SIZE,
2757			.setkey = skcipher_aes_setkey,
2758		},
2759		.desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
2760				     DESC_HDR_SEL0_AESU |
2761				     DESC_HDR_MODE0_AESU_CTR,
2762	},
2763	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
2764		.alg.skcipher = {
2765			.base.cra_name = "ecb(des)",
2766			.base.cra_driver_name = "ecb-des-talitos",
2767			.base.cra_blocksize = DES_BLOCK_SIZE,
2768			.base.cra_flags = CRYPTO_ALG_ASYNC |
2769					  CRYPTO_ALG_ALLOCATES_MEMORY,
2770			.min_keysize = DES_KEY_SIZE,
2771			.max_keysize = DES_KEY_SIZE,
2772			.setkey = skcipher_des_setkey,
2773		},
2774		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2775				     DESC_HDR_SEL0_DEU,
2776	},
2777	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
2778		.alg.skcipher = {
2779			.base.cra_name = "cbc(des)",
2780			.base.cra_driver_name = "cbc-des-talitos",
2781			.base.cra_blocksize = DES_BLOCK_SIZE,
2782			.base.cra_flags = CRYPTO_ALG_ASYNC |
2783					  CRYPTO_ALG_ALLOCATES_MEMORY,
2784			.min_keysize = DES_KEY_SIZE,
2785			.max_keysize = DES_KEY_SIZE,
2786			.ivsize = DES_BLOCK_SIZE,
2787			.setkey = skcipher_des_setkey,
2788		},
2789		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2790				     DESC_HDR_SEL0_DEU |
2791				     DESC_HDR_MODE0_DEU_CBC,
2792	},
2793	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
2794		.alg.skcipher = {
2795			.base.cra_name = "ecb(des3_ede)",
2796			.base.cra_driver_name = "ecb-3des-talitos",
2797			.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2798			.base.cra_flags = CRYPTO_ALG_ASYNC |
2799					  CRYPTO_ALG_ALLOCATES_MEMORY,
2800			.min_keysize = DES3_EDE_KEY_SIZE,
2801			.max_keysize = DES3_EDE_KEY_SIZE,
2802			.setkey = skcipher_des3_setkey,
2803		},
2804		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2805				     DESC_HDR_SEL0_DEU |
2806				     DESC_HDR_MODE0_DEU_3DES,
2807	},
2808	{	.type = CRYPTO_ALG_TYPE_SKCIPHER,
2809		.alg.skcipher = {
2810			.base.cra_name = "cbc(des3_ede)",
2811			.base.cra_driver_name = "cbc-3des-talitos",
2812			.base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2813			.base.cra_flags = CRYPTO_ALG_ASYNC |
2814					  CRYPTO_ALG_ALLOCATES_MEMORY,
2815			.min_keysize = DES3_EDE_KEY_SIZE,
2816			.max_keysize = DES3_EDE_KEY_SIZE,
2817			.ivsize = DES3_EDE_BLOCK_SIZE,
2818			.setkey = skcipher_des3_setkey,
2819		},
2820		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2821			             DESC_HDR_SEL0_DEU |
2822		                     DESC_HDR_MODE0_DEU_CBC |
2823		                     DESC_HDR_MODE0_DEU_3DES,
2824	},
2825	/* AHASH algorithms. */
2826	{	.type = CRYPTO_ALG_TYPE_AHASH,
2827		.alg.hash = {
 
 
 
 
 
2828			.halg.digestsize = MD5_DIGEST_SIZE,
2829			.halg.statesize = sizeof(struct talitos_export_state),
2830			.halg.base = {
2831				.cra_name = "md5",
2832				.cra_driver_name = "md5-talitos",
2833				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2834				.cra_flags = CRYPTO_ALG_ASYNC |
2835					     CRYPTO_ALG_ALLOCATES_MEMORY,
 
2836			}
2837		},
2838		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2839				     DESC_HDR_SEL0_MDEUA |
2840				     DESC_HDR_MODE0_MDEU_MD5,
2841	},
2842	{	.type = CRYPTO_ALG_TYPE_AHASH,
2843		.alg.hash = {
 
 
 
 
 
2844			.halg.digestsize = SHA1_DIGEST_SIZE,
2845			.halg.statesize = sizeof(struct talitos_export_state),
2846			.halg.base = {
2847				.cra_name = "sha1",
2848				.cra_driver_name = "sha1-talitos",
2849				.cra_blocksize = SHA1_BLOCK_SIZE,
2850				.cra_flags = CRYPTO_ALG_ASYNC |
2851					     CRYPTO_ALG_ALLOCATES_MEMORY,
 
2852			}
2853		},
2854		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2855				     DESC_HDR_SEL0_MDEUA |
2856				     DESC_HDR_MODE0_MDEU_SHA1,
2857	},
2858	{	.type = CRYPTO_ALG_TYPE_AHASH,
2859		.alg.hash = {
 
 
 
 
 
2860			.halg.digestsize = SHA224_DIGEST_SIZE,
2861			.halg.statesize = sizeof(struct talitos_export_state),
2862			.halg.base = {
2863				.cra_name = "sha224",
2864				.cra_driver_name = "sha224-talitos",
2865				.cra_blocksize = SHA224_BLOCK_SIZE,
2866				.cra_flags = CRYPTO_ALG_ASYNC |
2867					     CRYPTO_ALG_ALLOCATES_MEMORY,
 
2868			}
2869		},
2870		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2871				     DESC_HDR_SEL0_MDEUA |
2872				     DESC_HDR_MODE0_MDEU_SHA224,
2873	},
2874	{	.type = CRYPTO_ALG_TYPE_AHASH,
2875		.alg.hash = {
 
 
 
 
 
2876			.halg.digestsize = SHA256_DIGEST_SIZE,
2877			.halg.statesize = sizeof(struct talitos_export_state),
2878			.halg.base = {
2879				.cra_name = "sha256",
2880				.cra_driver_name = "sha256-talitos",
2881				.cra_blocksize = SHA256_BLOCK_SIZE,
2882				.cra_flags = CRYPTO_ALG_ASYNC |
2883					     CRYPTO_ALG_ALLOCATES_MEMORY,
 
2884			}
2885		},
2886		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2887				     DESC_HDR_SEL0_MDEUA |
2888				     DESC_HDR_MODE0_MDEU_SHA256,
2889	},
2890	{	.type = CRYPTO_ALG_TYPE_AHASH,
2891		.alg.hash = {
 
 
 
 
 
2892			.halg.digestsize = SHA384_DIGEST_SIZE,
2893			.halg.statesize = sizeof(struct talitos_export_state),
2894			.halg.base = {
2895				.cra_name = "sha384",
2896				.cra_driver_name = "sha384-talitos",
2897				.cra_blocksize = SHA384_BLOCK_SIZE,
2898				.cra_flags = CRYPTO_ALG_ASYNC |
2899					     CRYPTO_ALG_ALLOCATES_MEMORY,
 
2900			}
2901		},
2902		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2903				     DESC_HDR_SEL0_MDEUB |
2904				     DESC_HDR_MODE0_MDEUB_SHA384,
2905	},
2906	{	.type = CRYPTO_ALG_TYPE_AHASH,
2907		.alg.hash = {
 
 
 
 
 
2908			.halg.digestsize = SHA512_DIGEST_SIZE,
2909			.halg.statesize = sizeof(struct talitos_export_state),
2910			.halg.base = {
2911				.cra_name = "sha512",
2912				.cra_driver_name = "sha512-talitos",
2913				.cra_blocksize = SHA512_BLOCK_SIZE,
2914				.cra_flags = CRYPTO_ALG_ASYNC |
2915					     CRYPTO_ALG_ALLOCATES_MEMORY,
 
2916			}
2917		},
2918		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2919				     DESC_HDR_SEL0_MDEUB |
2920				     DESC_HDR_MODE0_MDEUB_SHA512,
2921	},
2922	{	.type = CRYPTO_ALG_TYPE_AHASH,
2923		.alg.hash = {
 
 
 
 
 
 
2924			.halg.digestsize = MD5_DIGEST_SIZE,
2925			.halg.statesize = sizeof(struct talitos_export_state),
2926			.halg.base = {
2927				.cra_name = "hmac(md5)",
2928				.cra_driver_name = "hmac-md5-talitos",
2929				.cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2930				.cra_flags = CRYPTO_ALG_ASYNC |
2931					     CRYPTO_ALG_ALLOCATES_MEMORY,
 
2932			}
2933		},
2934		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2935				     DESC_HDR_SEL0_MDEUA |
2936				     DESC_HDR_MODE0_MDEU_MD5,
2937	},
2938	{	.type = CRYPTO_ALG_TYPE_AHASH,
2939		.alg.hash = {
 
 
 
 
 
 
2940			.halg.digestsize = SHA1_DIGEST_SIZE,
2941			.halg.statesize = sizeof(struct talitos_export_state),
2942			.halg.base = {
2943				.cra_name = "hmac(sha1)",
2944				.cra_driver_name = "hmac-sha1-talitos",
2945				.cra_blocksize = SHA1_BLOCK_SIZE,
2946				.cra_flags = CRYPTO_ALG_ASYNC |
2947					     CRYPTO_ALG_ALLOCATES_MEMORY,
 
2948			}
2949		},
2950		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2951				     DESC_HDR_SEL0_MDEUA |
2952				     DESC_HDR_MODE0_MDEU_SHA1,
2953	},
2954	{	.type = CRYPTO_ALG_TYPE_AHASH,
2955		.alg.hash = {
 
 
 
 
 
 
2956			.halg.digestsize = SHA224_DIGEST_SIZE,
2957			.halg.statesize = sizeof(struct talitos_export_state),
2958			.halg.base = {
2959				.cra_name = "hmac(sha224)",
2960				.cra_driver_name = "hmac-sha224-talitos",
2961				.cra_blocksize = SHA224_BLOCK_SIZE,
2962				.cra_flags = CRYPTO_ALG_ASYNC |
2963					     CRYPTO_ALG_ALLOCATES_MEMORY,
 
2964			}
2965		},
2966		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2967				     DESC_HDR_SEL0_MDEUA |
2968				     DESC_HDR_MODE0_MDEU_SHA224,
2969	},
2970	{	.type = CRYPTO_ALG_TYPE_AHASH,
2971		.alg.hash = {
 
 
 
 
 
 
2972			.halg.digestsize = SHA256_DIGEST_SIZE,
2973			.halg.statesize = sizeof(struct talitos_export_state),
2974			.halg.base = {
2975				.cra_name = "hmac(sha256)",
2976				.cra_driver_name = "hmac-sha256-talitos",
2977				.cra_blocksize = SHA256_BLOCK_SIZE,
2978				.cra_flags = CRYPTO_ALG_ASYNC |
2979					     CRYPTO_ALG_ALLOCATES_MEMORY,
 
2980			}
2981		},
2982		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2983				     DESC_HDR_SEL0_MDEUA |
2984				     DESC_HDR_MODE0_MDEU_SHA256,
2985	},
2986	{	.type = CRYPTO_ALG_TYPE_AHASH,
2987		.alg.hash = {
 
 
 
 
 
 
2988			.halg.digestsize = SHA384_DIGEST_SIZE,
2989			.halg.statesize = sizeof(struct talitos_export_state),
2990			.halg.base = {
2991				.cra_name = "hmac(sha384)",
2992				.cra_driver_name = "hmac-sha384-talitos",
2993				.cra_blocksize = SHA384_BLOCK_SIZE,
2994				.cra_flags = CRYPTO_ALG_ASYNC |
2995					     CRYPTO_ALG_ALLOCATES_MEMORY,
 
2996			}
2997		},
2998		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2999				     DESC_HDR_SEL0_MDEUB |
3000				     DESC_HDR_MODE0_MDEUB_SHA384,
3001	},
3002	{	.type = CRYPTO_ALG_TYPE_AHASH,
3003		.alg.hash = {
 
 
 
 
 
 
3004			.halg.digestsize = SHA512_DIGEST_SIZE,
3005			.halg.statesize = sizeof(struct talitos_export_state),
3006			.halg.base = {
3007				.cra_name = "hmac(sha512)",
3008				.cra_driver_name = "hmac-sha512-talitos",
3009				.cra_blocksize = SHA512_BLOCK_SIZE,
3010				.cra_flags = CRYPTO_ALG_ASYNC |
3011					     CRYPTO_ALG_ALLOCATES_MEMORY,
 
3012			}
3013		},
3014		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3015				     DESC_HDR_SEL0_MDEUB |
3016				     DESC_HDR_MODE0_MDEUB_SHA512,
3017	}
3018};
3019
3020struct talitos_crypto_alg {
3021	struct list_head entry;
3022	struct device *dev;
3023	struct talitos_alg_template algt;
3024};
3025
3026static int talitos_init_common(struct talitos_ctx *ctx,
3027			       struct talitos_crypto_alg *talitos_alg)
3028{
 
 
 
3029	struct talitos_private *priv;
3030
 
 
 
 
 
 
 
 
3031	/* update context with ptr to dev */
3032	ctx->dev = talitos_alg->dev;
3033
3034	/* assign SEC channel to tfm in round-robin fashion */
3035	priv = dev_get_drvdata(ctx->dev);
3036	ctx->ch = atomic_inc_return(&priv->last_chan) &
3037		  (priv->num_channels - 1);
3038
3039	/* copy descriptor header template value */
3040	ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
3041
3042	/* select done notification */
3043	ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
3044
3045	return 0;
3046}
3047
3048static int talitos_cra_init_aead(struct crypto_aead *tfm)
3049{
3050	struct aead_alg *alg = crypto_aead_alg(tfm);
3051	struct talitos_crypto_alg *talitos_alg;
3052	struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
3053
3054	talitos_alg = container_of(alg, struct talitos_crypto_alg,
3055				   algt.alg.aead);
3056
3057	return talitos_init_common(ctx, talitos_alg);
3058}
3059
3060static int talitos_cra_init_skcipher(struct crypto_skcipher *tfm)
3061{
3062	struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
3063	struct talitos_crypto_alg *talitos_alg;
3064	struct talitos_ctx *ctx = crypto_skcipher_ctx(tfm);
3065
3066	talitos_alg = container_of(alg, struct talitos_crypto_alg,
3067				   algt.alg.skcipher);
3068
3069	return talitos_init_common(ctx, talitos_alg);
3070}
3071
3072static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
3073{
3074	struct crypto_alg *alg = tfm->__crt_alg;
3075	struct talitos_crypto_alg *talitos_alg;
3076	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3077
3078	talitos_alg = container_of(__crypto_ahash_alg(alg),
3079				   struct talitos_crypto_alg,
3080				   algt.alg.hash);
3081
3082	ctx->keylen = 0;
3083	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
3084				 sizeof(struct talitos_ahash_req_ctx));
3085
3086	return talitos_init_common(ctx, talitos_alg);
3087}
3088
3089static void talitos_cra_exit(struct crypto_tfm *tfm)
3090{
3091	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3092	struct device *dev = ctx->dev;
3093
3094	if (ctx->keylen)
3095		dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
3096}
3097
3098/*
3099 * given the alg's descriptor header template, determine whether descriptor
3100 * type and primary/secondary execution units required match the hw
3101 * capabilities description provided in the device tree node.
3102 */
3103static int hw_supports(struct device *dev, __be32 desc_hdr_template)
3104{
3105	struct talitos_private *priv = dev_get_drvdata(dev);
3106	int ret;
3107
3108	ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
3109	      (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
3110
3111	if (SECONDARY_EU(desc_hdr_template))
3112		ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
3113		              & priv->exec_units);
3114
3115	return ret;
3116}
3117
3118static int talitos_remove(struct platform_device *ofdev)
3119{
3120	struct device *dev = &ofdev->dev;
3121	struct talitos_private *priv = dev_get_drvdata(dev);
3122	struct talitos_crypto_alg *t_alg, *n;
3123	int i;
3124
3125	list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
3126		switch (t_alg->algt.type) {
3127		case CRYPTO_ALG_TYPE_SKCIPHER:
3128			crypto_unregister_skcipher(&t_alg->algt.alg.skcipher);
3129			break;
3130		case CRYPTO_ALG_TYPE_AEAD:
3131			crypto_unregister_aead(&t_alg->algt.alg.aead);
3132			break;
3133		case CRYPTO_ALG_TYPE_AHASH:
3134			crypto_unregister_ahash(&t_alg->algt.alg.hash);
3135			break;
3136		}
3137		list_del(&t_alg->entry);
 
3138	}
3139
3140	if (hw_supports(dev, DESC_HDR_SEL0_RNG))
3141		talitos_unregister_rng(dev);
3142
 
 
 
 
 
3143	for (i = 0; i < 2; i++)
3144		if (priv->irq[i]) {
3145			free_irq(priv->irq[i], dev);
3146			irq_dispose_mapping(priv->irq[i]);
3147		}
3148
3149	tasklet_kill(&priv->done_task[0]);
3150	if (priv->irq[1])
3151		tasklet_kill(&priv->done_task[1]);
3152
 
 
 
 
 
 
3153	return 0;
3154}
3155
3156static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
3157						    struct talitos_alg_template
3158						           *template)
3159{
3160	struct talitos_private *priv = dev_get_drvdata(dev);
3161	struct talitos_crypto_alg *t_alg;
3162	struct crypto_alg *alg;
3163
3164	t_alg = devm_kzalloc(dev, sizeof(struct talitos_crypto_alg),
3165			     GFP_KERNEL);
3166	if (!t_alg)
3167		return ERR_PTR(-ENOMEM);
3168
3169	t_alg->algt = *template;
3170
3171	switch (t_alg->algt.type) {
3172	case CRYPTO_ALG_TYPE_SKCIPHER:
3173		alg = &t_alg->algt.alg.skcipher.base;
3174		alg->cra_exit = talitos_cra_exit;
3175		t_alg->algt.alg.skcipher.init = talitos_cra_init_skcipher;
3176		t_alg->algt.alg.skcipher.setkey =
3177			t_alg->algt.alg.skcipher.setkey ?: skcipher_setkey;
3178		t_alg->algt.alg.skcipher.encrypt = skcipher_encrypt;
3179		t_alg->algt.alg.skcipher.decrypt = skcipher_decrypt;
3180		break;
3181	case CRYPTO_ALG_TYPE_AEAD:
3182		alg = &t_alg->algt.alg.aead.base;
3183		alg->cra_exit = talitos_cra_exit;
3184		t_alg->algt.alg.aead.init = talitos_cra_init_aead;
3185		t_alg->algt.alg.aead.setkey = t_alg->algt.alg.aead.setkey ?:
3186					      aead_setkey;
3187		t_alg->algt.alg.aead.encrypt = aead_encrypt;
3188		t_alg->algt.alg.aead.decrypt = aead_decrypt;
3189		if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3190		    !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) {
3191			devm_kfree(dev, t_alg);
3192			return ERR_PTR(-ENOTSUPP);
3193		}
3194		break;
3195	case CRYPTO_ALG_TYPE_AHASH:
3196		alg = &t_alg->algt.alg.hash.halg.base;
3197		alg->cra_init = talitos_cra_init_ahash;
3198		alg->cra_exit = talitos_cra_exit;
3199		t_alg->algt.alg.hash.init = ahash_init;
3200		t_alg->algt.alg.hash.update = ahash_update;
3201		t_alg->algt.alg.hash.final = ahash_final;
3202		t_alg->algt.alg.hash.finup = ahash_finup;
3203		t_alg->algt.alg.hash.digest = ahash_digest;
3204		if (!strncmp(alg->cra_name, "hmac", 4))
3205			t_alg->algt.alg.hash.setkey = ahash_setkey;
3206		t_alg->algt.alg.hash.import = ahash_import;
3207		t_alg->algt.alg.hash.export = ahash_export;
3208
3209		if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
3210		    !strncmp(alg->cra_name, "hmac", 4)) {
3211			devm_kfree(dev, t_alg);
3212			return ERR_PTR(-ENOTSUPP);
3213		}
3214		if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3215		    (!strcmp(alg->cra_name, "sha224") ||
3216		     !strcmp(alg->cra_name, "hmac(sha224)"))) {
3217			t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
3218			t_alg->algt.desc_hdr_template =
3219					DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3220					DESC_HDR_SEL0_MDEUA |
3221					DESC_HDR_MODE0_MDEU_SHA256;
3222		}
3223		break;
3224	default:
3225		dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
3226		devm_kfree(dev, t_alg);
3227		return ERR_PTR(-EINVAL);
3228	}
3229
3230	alg->cra_module = THIS_MODULE;
3231	if (t_alg->algt.priority)
3232		alg->cra_priority = t_alg->algt.priority;
3233	else
3234		alg->cra_priority = TALITOS_CRA_PRIORITY;
3235	if (has_ftr_sec1(priv))
3236		alg->cra_alignmask = 3;
3237	else
3238		alg->cra_alignmask = 0;
3239	alg->cra_ctxsize = sizeof(struct talitos_ctx);
3240	alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
3241
3242	t_alg->dev = dev;
3243
3244	return t_alg;
3245}
3246
3247static int talitos_probe_irq(struct platform_device *ofdev)
3248{
3249	struct device *dev = &ofdev->dev;
3250	struct device_node *np = ofdev->dev.of_node;
3251	struct talitos_private *priv = dev_get_drvdata(dev);
3252	int err;
3253	bool is_sec1 = has_ftr_sec1(priv);
3254
3255	priv->irq[0] = irq_of_parse_and_map(np, 0);
3256	if (!priv->irq[0]) {
3257		dev_err(dev, "failed to map irq\n");
3258		return -EINVAL;
3259	}
3260	if (is_sec1) {
3261		err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
3262				  dev_driver_string(dev), dev);
3263		goto primary_out;
3264	}
3265
3266	priv->irq[1] = irq_of_parse_and_map(np, 1);
3267
3268	/* get the primary irq line */
3269	if (!priv->irq[1]) {
3270		err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
3271				  dev_driver_string(dev), dev);
3272		goto primary_out;
3273	}
3274
3275	err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
3276			  dev_driver_string(dev), dev);
3277	if (err)
3278		goto primary_out;
3279
3280	/* get the secondary irq line */
3281	err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
3282			  dev_driver_string(dev), dev);
3283	if (err) {
3284		dev_err(dev, "failed to request secondary irq\n");
3285		irq_dispose_mapping(priv->irq[1]);
3286		priv->irq[1] = 0;
3287	}
3288
3289	return err;
3290
3291primary_out:
3292	if (err) {
3293		dev_err(dev, "failed to request primary irq\n");
3294		irq_dispose_mapping(priv->irq[0]);
3295		priv->irq[0] = 0;
3296	}
3297
3298	return err;
3299}
3300
3301static int talitos_probe(struct platform_device *ofdev)
3302{
3303	struct device *dev = &ofdev->dev;
3304	struct device_node *np = ofdev->dev.of_node;
3305	struct talitos_private *priv;
 
3306	int i, err;
3307	int stride;
3308	struct resource *res;
3309
3310	priv = devm_kzalloc(dev, sizeof(struct talitos_private), GFP_KERNEL);
3311	if (!priv)
3312		return -ENOMEM;
3313
3314	INIT_LIST_HEAD(&priv->alg_list);
3315
3316	dev_set_drvdata(dev, priv);
3317
3318	priv->ofdev = ofdev;
3319
3320	spin_lock_init(&priv->reg_lock);
3321
3322	res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
3323	if (!res)
3324		return -ENXIO;
3325	priv->reg = devm_ioremap(dev, res->start, resource_size(res));
 
 
 
 
 
 
 
 
 
 
 
 
 
3326	if (!priv->reg) {
3327		dev_err(dev, "failed to of_iomap\n");
3328		err = -ENOMEM;
3329		goto err_out;
3330	}
3331
3332	/* get SEC version capabilities from device tree */
3333	of_property_read_u32(np, "fsl,num-channels", &priv->num_channels);
3334	of_property_read_u32(np, "fsl,channel-fifo-len", &priv->chfifo_len);
3335	of_property_read_u32(np, "fsl,exec-units-mask", &priv->exec_units);
3336	of_property_read_u32(np, "fsl,descriptor-types-mask",
3337			     &priv->desc_types);
 
 
 
 
 
 
 
 
 
 
3338
3339	if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
3340	    !priv->exec_units || !priv->desc_types) {
3341		dev_err(dev, "invalid property data in device tree node\n");
3342		err = -EINVAL;
3343		goto err_out;
3344	}
3345
3346	if (of_device_is_compatible(np, "fsl,sec3.0"))
3347		priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
3348
3349	if (of_device_is_compatible(np, "fsl,sec2.1"))
3350		priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
3351				  TALITOS_FTR_SHA224_HWINIT |
3352				  TALITOS_FTR_HMAC_OK;
3353
3354	if (of_device_is_compatible(np, "fsl,sec1.0"))
3355		priv->features |= TALITOS_FTR_SEC1;
3356
3357	if (of_device_is_compatible(np, "fsl,sec1.2")) {
3358		priv->reg_deu = priv->reg + TALITOS12_DEU;
3359		priv->reg_aesu = priv->reg + TALITOS12_AESU;
3360		priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
3361		stride = TALITOS1_CH_STRIDE;
3362	} else if (of_device_is_compatible(np, "fsl,sec1.0")) {
3363		priv->reg_deu = priv->reg + TALITOS10_DEU;
3364		priv->reg_aesu = priv->reg + TALITOS10_AESU;
3365		priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
3366		priv->reg_afeu = priv->reg + TALITOS10_AFEU;
3367		priv->reg_rngu = priv->reg + TALITOS10_RNGU;
3368		priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
3369		stride = TALITOS1_CH_STRIDE;
3370	} else {
3371		priv->reg_deu = priv->reg + TALITOS2_DEU;
3372		priv->reg_aesu = priv->reg + TALITOS2_AESU;
3373		priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
3374		priv->reg_afeu = priv->reg + TALITOS2_AFEU;
3375		priv->reg_rngu = priv->reg + TALITOS2_RNGU;
3376		priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
3377		priv->reg_keu = priv->reg + TALITOS2_KEU;
3378		priv->reg_crcu = priv->reg + TALITOS2_CRCU;
3379		stride = TALITOS2_CH_STRIDE;
3380	}
3381
3382	err = talitos_probe_irq(ofdev);
3383	if (err)
3384		goto err_out;
3385
3386	if (has_ftr_sec1(priv)) {
3387		if (priv->num_channels == 1)
3388			tasklet_init(&priv->done_task[0], talitos1_done_ch0,
3389				     (unsigned long)dev);
3390		else
3391			tasklet_init(&priv->done_task[0], talitos1_done_4ch,
3392				     (unsigned long)dev);
3393	} else {
3394		if (priv->irq[1]) {
3395			tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
3396				     (unsigned long)dev);
3397			tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
3398				     (unsigned long)dev);
3399		} else if (priv->num_channels == 1) {
3400			tasklet_init(&priv->done_task[0], talitos2_done_ch0,
3401				     (unsigned long)dev);
3402		} else {
3403			tasklet_init(&priv->done_task[0], talitos2_done_4ch,
3404				     (unsigned long)dev);
3405		}
3406	}
3407
3408	priv->chan = devm_kcalloc(dev,
3409				  priv->num_channels,
3410				  sizeof(struct talitos_channel),
3411				  GFP_KERNEL);
3412	if (!priv->chan) {
3413		dev_err(dev, "failed to allocate channel management space\n");
3414		err = -ENOMEM;
3415		goto err_out;
3416	}
3417
3418	priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
3419
3420	for (i = 0; i < priv->num_channels; i++) {
3421		priv->chan[i].reg = priv->reg + stride * (i + 1);
3422		if (!priv->irq[1] || !(i & 1))
3423			priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
 
3424
 
3425		spin_lock_init(&priv->chan[i].head_lock);
3426		spin_lock_init(&priv->chan[i].tail_lock);
 
 
 
3427
3428		priv->chan[i].fifo = devm_kcalloc(dev,
3429						priv->fifo_len,
3430						sizeof(struct talitos_request),
3431						GFP_KERNEL);
3432		if (!priv->chan[i].fifo) {
3433			dev_err(dev, "failed to allocate request fifo %d\n", i);
3434			err = -ENOMEM;
3435			goto err_out;
3436		}
 
3437
 
3438		atomic_set(&priv->chan[i].submit_count,
3439			   -(priv->chfifo_len - 1));
3440	}
3441
3442	dma_set_mask(dev, DMA_BIT_MASK(36));
3443
3444	/* reset and initialize the h/w */
3445	err = init_device(dev);
3446	if (err) {
3447		dev_err(dev, "failed to initialize device\n");
3448		goto err_out;
3449	}
3450
3451	/* register the RNG, if available */
3452	if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3453		err = talitos_register_rng(dev);
3454		if (err) {
3455			dev_err(dev, "failed to register hwrng: %d\n", err);
3456			goto err_out;
3457		} else
3458			dev_info(dev, "hwrng\n");
3459	}
3460
3461	/* register crypto algorithms the device supports */
3462	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3463		if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3464			struct talitos_crypto_alg *t_alg;
3465			struct crypto_alg *alg = NULL;
3466
3467			t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3468			if (IS_ERR(t_alg)) {
3469				err = PTR_ERR(t_alg);
3470				if (err == -ENOTSUPP)
3471					continue;
3472				goto err_out;
3473			}
3474
3475			switch (t_alg->algt.type) {
3476			case CRYPTO_ALG_TYPE_SKCIPHER:
3477				err = crypto_register_skcipher(
3478						&t_alg->algt.alg.skcipher);
3479				alg = &t_alg->algt.alg.skcipher.base;
3480				break;
3481
3482			case CRYPTO_ALG_TYPE_AEAD:
3483				err = crypto_register_aead(
3484					&t_alg->algt.alg.aead);
3485				alg = &t_alg->algt.alg.aead.base;
3486				break;
3487
3488			case CRYPTO_ALG_TYPE_AHASH:
3489				err = crypto_register_ahash(
3490						&t_alg->algt.alg.hash);
3491				alg = &t_alg->algt.alg.hash.halg.base;
 
3492				break;
3493			}
3494			if (err) {
3495				dev_err(dev, "%s alg registration failed\n",
3496					alg->cra_driver_name);
3497				devm_kfree(dev, t_alg);
3498			} else
3499				list_add_tail(&t_alg->entry, &priv->alg_list);
3500		}
3501	}
3502	if (!list_empty(&priv->alg_list))
3503		dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3504			 (char *)of_get_property(np, "compatible", NULL));
3505
3506	return 0;
3507
3508err_out:
3509	talitos_remove(ofdev);
3510
3511	return err;
3512}
3513
3514static const struct of_device_id talitos_match[] = {
3515#ifdef CONFIG_CRYPTO_DEV_TALITOS1
3516	{
3517		.compatible = "fsl,sec1.0",
3518	},
3519#endif
3520#ifdef CONFIG_CRYPTO_DEV_TALITOS2
3521	{
3522		.compatible = "fsl,sec2.0",
3523	},
3524#endif
3525	{},
3526};
3527MODULE_DEVICE_TABLE(of, talitos_match);
3528
3529static struct platform_driver talitos_driver = {
3530	.driver = {
3531		.name = "talitos",
 
3532		.of_match_table = talitos_match,
3533	},
3534	.probe = talitos_probe,
3535	.remove = talitos_remove,
3536};
3537
3538module_platform_driver(talitos_driver);
3539
3540MODULE_LICENSE("GPL");
3541MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3542MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");
v3.5.6
 
   1/*
   2 * talitos - Freescale Integrated Security Engine (SEC) device driver
   3 *
   4 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
   5 *
   6 * Scatterlist Crypto API glue code copied from files with the following:
   7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
   8 *
   9 * Crypto algorithm registration code copied from hifn driver:
  10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
  11 * All rights reserved.
  12 *
  13 * This program is free software; you can redistribute it and/or modify
  14 * it under the terms of the GNU General Public License as published by
  15 * the Free Software Foundation; either version 2 of the License, or
  16 * (at your option) any later version.
  17 *
  18 * This program is distributed in the hope that it will be useful,
  19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  21 * GNU General Public License for more details.
  22 *
  23 * You should have received a copy of the GNU General Public License
  24 * along with this program; if not, write to the Free Software
  25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  26 */
  27
  28#include <linux/kernel.h>
  29#include <linux/module.h>
  30#include <linux/mod_devicetable.h>
  31#include <linux/device.h>
  32#include <linux/interrupt.h>
  33#include <linux/crypto.h>
  34#include <linux/hw_random.h>
 
 
  35#include <linux/of_platform.h>
  36#include <linux/dma-mapping.h>
  37#include <linux/io.h>
  38#include <linux/spinlock.h>
  39#include <linux/rtnetlink.h>
  40#include <linux/slab.h>
  41
  42#include <crypto/algapi.h>
  43#include <crypto/aes.h>
  44#include <crypto/des.h>
  45#include <crypto/sha.h>
  46#include <crypto/md5.h>
  47#include <crypto/aead.h>
  48#include <crypto/authenc.h>
  49#include <crypto/skcipher.h>
  50#include <crypto/hash.h>
  51#include <crypto/internal/hash.h>
  52#include <crypto/scatterwalk.h>
  53
  54#include "talitos.h"
  55
  56#define TALITOS_TIMEOUT 100000
  57#define TALITOS_MAX_DATA_LEN 65535
 
 
 
 
 
 
 
 
 
  58
  59#define DESC_TYPE(desc_hdr) ((be32_to_cpu(desc_hdr) >> 3) & 0x1f)
  60#define PRIMARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 28) & 0xf)
  61#define SECONDARY_EU(desc_hdr) ((be32_to_cpu(desc_hdr) >> 16) & 0xf)
  62
  63/* descriptor pointer entry */
  64struct talitos_ptr {
  65	__be16 len;	/* length */
  66	u8 j_extent;	/* jump to sg link table and/or extent */
  67	u8 eptr;	/* extended address */
  68	__be32 ptr;	/* address */
  69};
  70
  71static const struct talitos_ptr zero_entry = {
  72	.len = 0,
  73	.j_extent = 0,
  74	.eptr = 0,
  75	.ptr = 0
  76};
 
 
  77
  78/* descriptor */
  79struct talitos_desc {
  80	__be32 hdr;			/* header high bits */
  81	__be32 hdr_lo;			/* header low bits */
  82	struct talitos_ptr ptr[7];	/* ptr/len pair array */
  83};
  84
  85/**
  86 * talitos_request - descriptor submission request
  87 * @desc: descriptor pointer (kernel virtual)
  88 * @dma_desc: descriptor's physical bus address
  89 * @callback: whom to call when descriptor processing is done
  90 * @context: caller context (optional)
  91 */
  92struct talitos_request {
  93	struct talitos_desc *desc;
  94	dma_addr_t dma_desc;
  95	void (*callback) (struct device *dev, struct talitos_desc *desc,
  96	                  void *context, int error);
  97	void *context;
  98};
  99
 100/* per-channel fifo management */
 101struct talitos_channel {
 102	void __iomem *reg;
 103
 104	/* request fifo */
 105	struct talitos_request *fifo;
 106
 107	/* number of requests pending in channel h/w fifo */
 108	atomic_t submit_count ____cacheline_aligned;
 109
 110	/* request submission (head) lock */
 111	spinlock_t head_lock ____cacheline_aligned;
 112	/* index to next free descriptor request */
 113	int head;
 114
 115	/* request release (tail) lock */
 116	spinlock_t tail_lock ____cacheline_aligned;
 117	/* index to next in-progress/done descriptor request */
 118	int tail;
 119};
 120
 121struct talitos_private {
 122	struct device *dev;
 123	struct platform_device *ofdev;
 124	void __iomem *reg;
 125	int irq[2];
 126
 127	/* SEC global registers lock  */
 128	spinlock_t reg_lock ____cacheline_aligned;
 129
 130	/* SEC version geometry (from device tree node) */
 131	unsigned int num_channels;
 132	unsigned int chfifo_len;
 133	unsigned int exec_units;
 134	unsigned int desc_types;
 135
 136	/* SEC Compatibility info */
 137	unsigned long features;
 138
 139	/*
 140	 * length of the request fifo
 141	 * fifo_len is chfifo_len rounded up to next power of 2
 142	 * so we can use bitwise ops to wrap
 143	 */
 144	unsigned int fifo_len;
 145
 146	struct talitos_channel *chan;
 147
 148	/* next channel to be assigned next incoming descriptor */
 149	atomic_t last_chan ____cacheline_aligned;
 150
 151	/* request callback tasklet */
 152	struct tasklet_struct done_task[2];
 153
 154	/* list of registered algorithms */
 155	struct list_head alg_list;
 156
 157	/* hwrng device */
 158	struct hwrng rng;
 159};
 160
 161/* .features flag */
 162#define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001
 163#define TALITOS_FTR_HW_AUTH_CHECK 0x00000002
 164#define TALITOS_FTR_SHA224_HWINIT 0x00000004
 165#define TALITOS_FTR_HMAC_OK 0x00000008
 166
 167static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr)
 168{
 169	talitos_ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
 170	talitos_ptr->eptr = upper_32_bits(dma_addr);
 171}
 172
 173/*
 174 * map virtual single (contiguous) pointer to h/w descriptor pointer
 175 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 176static void map_single_talitos_ptr(struct device *dev,
 177				   struct talitos_ptr *talitos_ptr,
 178				   unsigned short len, void *data,
 179				   unsigned char extent,
 180				   enum dma_data_direction dir)
 181{
 182	dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
 
 183
 184	talitos_ptr->len = cpu_to_be16(len);
 185	to_talitos_ptr(talitos_ptr, dma_addr);
 186	talitos_ptr->j_extent = extent;
 
 
 
 
 187}
 188
 189/*
 190 * unmap bus single (contiguous) h/w descriptor pointer
 191 */
 192static void unmap_single_talitos_ptr(struct device *dev,
 193				     struct talitos_ptr *talitos_ptr,
 194				     enum dma_data_direction dir)
 195{
 196	dma_unmap_single(dev, be32_to_cpu(talitos_ptr->ptr),
 197			 be16_to_cpu(talitos_ptr->len), dir);
 
 
 
 198}
 199
 200static int reset_channel(struct device *dev, int ch)
 201{
 202	struct talitos_private *priv = dev_get_drvdata(dev);
 203	unsigned int timeout = TALITOS_TIMEOUT;
 
 204
 205	setbits32(priv->chan[ch].reg + TALITOS_CCCR, TALITOS_CCCR_RESET);
 
 
 
 
 
 
 
 
 
 206
 207	while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) & TALITOS_CCCR_RESET)
 208	       && --timeout)
 209		cpu_relax();
 
 210
 211	if (timeout == 0) {
 212		dev_err(dev, "failed to reset channel %d\n", ch);
 213		return -EIO;
 214	}
 215
 216	/* set 36-bit addressing, done writeback enable and done IRQ enable */
 217	setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
 218		  TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
 
 
 
 
 219
 220	/* and ICCR writeback, if available */
 221	if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
 222		setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
 223		          TALITOS_CCCR_LO_IWSE);
 224
 225	return 0;
 226}
 227
 228static int reset_device(struct device *dev)
 229{
 230	struct talitos_private *priv = dev_get_drvdata(dev);
 231	unsigned int timeout = TALITOS_TIMEOUT;
 232	u32 mcr = TALITOS_MCR_SWR;
 
 233
 234	setbits32(priv->reg + TALITOS_MCR, mcr);
 235
 236	while ((in_be32(priv->reg + TALITOS_MCR) & TALITOS_MCR_SWR)
 237	       && --timeout)
 238		cpu_relax();
 239
 240	if (priv->irq[1]) {
 241		mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
 242		setbits32(priv->reg + TALITOS_MCR, mcr);
 243	}
 244
 245	if (timeout == 0) {
 246		dev_err(dev, "failed to reset device\n");
 247		return -EIO;
 248	}
 249
 250	return 0;
 251}
 252
 253/*
 254 * Reset and initialize the device
 255 */
 256static int init_device(struct device *dev)
 257{
 258	struct talitos_private *priv = dev_get_drvdata(dev);
 259	int ch, err;
 
 260
 261	/*
 262	 * Master reset
 263	 * errata documentation: warning: certain SEC interrupts
 264	 * are not fully cleared by writing the MCR:SWR bit,
 265	 * set bit twice to completely reset
 266	 */
 267	err = reset_device(dev);
 268	if (err)
 269		return err;
 270
 271	err = reset_device(dev);
 272	if (err)
 273		return err;
 274
 275	/* reset channels */
 276	for (ch = 0; ch < priv->num_channels; ch++) {
 277		err = reset_channel(dev, ch);
 278		if (err)
 279			return err;
 280	}
 281
 282	/* enable channel done and error interrupts */
 283	setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_INIT);
 284	setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT);
 
 
 
 
 
 
 
 285
 286	/* disable integrity check error interrupts (use writeback instead) */
 287	if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
 288		setbits32(priv->reg + TALITOS_MDEUICR_LO,
 289		          TALITOS_MDEUICR_LO_ICE);
 290
 291	return 0;
 292}
 293
 294/**
 295 * talitos_submit - submits a descriptor to the device for processing
 296 * @dev:	the SEC device to be used
 297 * @ch:		the SEC device channel to be used
 298 * @desc:	the descriptor to be processed by the device
 299 * @callback:	whom to call when processing is complete
 300 * @context:	a handle for use by caller (optional)
 301 *
 302 * desc must contain valid dma-mapped (bus physical) address pointers.
 303 * callback must check err and feedback in descriptor header
 304 * for device processing status.
 305 */
 306static int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
 307			  void (*callback)(struct device *dev,
 308					   struct talitos_desc *desc,
 309					   void *context, int error),
 310			  void *context)
 311{
 312	struct talitos_private *priv = dev_get_drvdata(dev);
 313	struct talitos_request *request;
 314	unsigned long flags;
 315	int head;
 
 316
 317	spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
 318
 319	if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
 320		/* h/w fifo is full */
 321		spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
 322		return -EAGAIN;
 323	}
 324
 325	head = priv->chan[ch].head;
 326	request = &priv->chan[ch].fifo[head];
 327
 328	/* map descriptor and save caller data */
 329	request->dma_desc = dma_map_single(dev, desc, sizeof(*desc),
 330					   DMA_BIDIRECTIONAL);
 
 
 
 
 
 
 
 
 331	request->callback = callback;
 332	request->context = context;
 333
 334	/* increment fifo head */
 335	priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
 336
 337	smp_wmb();
 338	request->desc = desc;
 339
 340	/* GO! */
 341	wmb();
 342	out_be32(priv->chan[ch].reg + TALITOS_FF,
 343		 upper_32_bits(request->dma_desc));
 344	out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
 345		 lower_32_bits(request->dma_desc));
 346
 347	spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
 348
 349	return -EINPROGRESS;
 350}
 351
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 352/*
 353 * process what was done, notify callback of error if not
 354 */
 355static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
 356{
 357	struct talitos_private *priv = dev_get_drvdata(dev);
 358	struct talitos_request *request, saved_req;
 359	unsigned long flags;
 360	int tail, status;
 
 361
 362	spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
 363
 364	tail = priv->chan[ch].tail;
 365	while (priv->chan[ch].fifo[tail].desc) {
 
 
 366		request = &priv->chan[ch].fifo[tail];
 367
 368		/* descriptors with their done bits set don't get the error */
 369		rmb();
 370		if ((request->desc->hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
 
 
 371			status = 0;
 372		else
 373			if (!error)
 374				break;
 375			else
 376				status = error;
 377
 378		dma_unmap_single(dev, request->dma_desc,
 379				 sizeof(struct talitos_desc),
 380				 DMA_BIDIRECTIONAL);
 381
 382		/* copy entries so we can call callback outside lock */
 383		saved_req.desc = request->desc;
 384		saved_req.callback = request->callback;
 385		saved_req.context = request->context;
 386
 387		/* release request entry in fifo */
 388		smp_wmb();
 389		request->desc = NULL;
 390
 391		/* increment fifo tail */
 392		priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
 393
 394		spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
 395
 396		atomic_dec(&priv->chan[ch].submit_count);
 397
 398		saved_req.callback(dev, saved_req.desc, saved_req.context,
 399				   status);
 400		/* channel may resume processing in single desc error case */
 401		if (error && !reset_ch && status == error)
 402			return;
 403		spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
 404		tail = priv->chan[ch].tail;
 405	}
 406
 407	spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
 408}
 409
 410/*
 411 * process completed requests for channels that have done status
 412 */
 413#define DEF_TALITOS_DONE(name, ch_done_mask)				\
 414static void talitos_done_##name(unsigned long data)			\
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 415{									\
 416	struct device *dev = (struct device *)data;			\
 417	struct talitos_private *priv = dev_get_drvdata(dev);		\
 418	unsigned long flags;						\
 419									\
 420	if (ch_done_mask & 1)						\
 421		flush_channel(dev, 0, 0, 0);				\
 422	if (priv->num_channels == 1)					\
 423		goto out;						\
 424	if (ch_done_mask & (1 << 2))					\
 425		flush_channel(dev, 1, 0, 0);				\
 426	if (ch_done_mask & (1 << 4))					\
 427		flush_channel(dev, 2, 0, 0);				\
 428	if (ch_done_mask & (1 << 6))					\
 429		flush_channel(dev, 3, 0, 0);				\
 430									\
 431out:									\
 432	/* At this point, all completed channels have been processed */	\
 433	/* Unmask done interrupts for channels completed later on. */	\
 434	spin_lock_irqsave(&priv->reg_lock, flags);			\
 435	setbits32(priv->reg + TALITOS_IMR, ch_done_mask);		\
 436	setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT);	\
 437	spin_unlock_irqrestore(&priv->reg_lock, flags);			\
 438}
 439DEF_TALITOS_DONE(4ch, TALITOS_ISR_4CHDONE)
 440DEF_TALITOS_DONE(ch0_2, TALITOS_ISR_CH_0_2_DONE)
 441DEF_TALITOS_DONE(ch1_3, TALITOS_ISR_CH_1_3_DONE)
 
 
 442
 443/*
 444 * locate current (offending) descriptor
 445 */
 446static u32 current_desc_hdr(struct device *dev, int ch)
 447{
 448	struct talitos_private *priv = dev_get_drvdata(dev);
 449	int tail = priv->chan[ch].tail;
 450	dma_addr_t cur_desc;
 451
 452	cur_desc = in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
 
 453
 454	while (priv->chan[ch].fifo[tail].dma_desc != cur_desc) {
 455		tail = (tail + 1) & (priv->fifo_len - 1);
 456		if (tail == priv->chan[ch].tail) {
 
 
 
 
 
 
 
 
 
 457			dev_err(dev, "couldn't locate current descriptor\n");
 458			return 0;
 459		}
 460	}
 461
 462	return priv->chan[ch].fifo[tail].desc->hdr;
 
 
 
 
 
 
 
 
 
 463}
 464
 465/*
 466 * user diagnostics; report root cause of error based on execution unit status
 467 */
 468static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
 469{
 470	struct talitos_private *priv = dev_get_drvdata(dev);
 471	int i;
 472
 473	if (!desc_hdr)
 474		desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
 475
 476	switch (desc_hdr & DESC_HDR_SEL0_MASK) {
 477	case DESC_HDR_SEL0_AFEU:
 478		dev_err(dev, "AFEUISR 0x%08x_%08x\n",
 479			in_be32(priv->reg + TALITOS_AFEUISR),
 480			in_be32(priv->reg + TALITOS_AFEUISR_LO));
 481		break;
 482	case DESC_HDR_SEL0_DEU:
 483		dev_err(dev, "DEUISR 0x%08x_%08x\n",
 484			in_be32(priv->reg + TALITOS_DEUISR),
 485			in_be32(priv->reg + TALITOS_DEUISR_LO));
 486		break;
 487	case DESC_HDR_SEL0_MDEUA:
 488	case DESC_HDR_SEL0_MDEUB:
 489		dev_err(dev, "MDEUISR 0x%08x_%08x\n",
 490			in_be32(priv->reg + TALITOS_MDEUISR),
 491			in_be32(priv->reg + TALITOS_MDEUISR_LO));
 492		break;
 493	case DESC_HDR_SEL0_RNG:
 494		dev_err(dev, "RNGUISR 0x%08x_%08x\n",
 495			in_be32(priv->reg + TALITOS_RNGUISR),
 496			in_be32(priv->reg + TALITOS_RNGUISR_LO));
 497		break;
 498	case DESC_HDR_SEL0_PKEU:
 499		dev_err(dev, "PKEUISR 0x%08x_%08x\n",
 500			in_be32(priv->reg + TALITOS_PKEUISR),
 501			in_be32(priv->reg + TALITOS_PKEUISR_LO));
 502		break;
 503	case DESC_HDR_SEL0_AESU:
 504		dev_err(dev, "AESUISR 0x%08x_%08x\n",
 505			in_be32(priv->reg + TALITOS_AESUISR),
 506			in_be32(priv->reg + TALITOS_AESUISR_LO));
 507		break;
 508	case DESC_HDR_SEL0_CRCU:
 509		dev_err(dev, "CRCUISR 0x%08x_%08x\n",
 510			in_be32(priv->reg + TALITOS_CRCUISR),
 511			in_be32(priv->reg + TALITOS_CRCUISR_LO));
 512		break;
 513	case DESC_HDR_SEL0_KEU:
 514		dev_err(dev, "KEUISR 0x%08x_%08x\n",
 515			in_be32(priv->reg + TALITOS_KEUISR),
 516			in_be32(priv->reg + TALITOS_KEUISR_LO));
 517		break;
 518	}
 519
 520	switch (desc_hdr & DESC_HDR_SEL1_MASK) {
 521	case DESC_HDR_SEL1_MDEUA:
 522	case DESC_HDR_SEL1_MDEUB:
 523		dev_err(dev, "MDEUISR 0x%08x_%08x\n",
 524			in_be32(priv->reg + TALITOS_MDEUISR),
 525			in_be32(priv->reg + TALITOS_MDEUISR_LO));
 526		break;
 527	case DESC_HDR_SEL1_CRCU:
 528		dev_err(dev, "CRCUISR 0x%08x_%08x\n",
 529			in_be32(priv->reg + TALITOS_CRCUISR),
 530			in_be32(priv->reg + TALITOS_CRCUISR_LO));
 531		break;
 532	}
 533
 534	for (i = 0; i < 8; i++)
 535		dev_err(dev, "DESCBUF 0x%08x_%08x\n",
 536			in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
 537			in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
 538}
 539
 540/*
 541 * recover from error interrupts
 542 */
 543static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
 544{
 545	struct talitos_private *priv = dev_get_drvdata(dev);
 546	unsigned int timeout = TALITOS_TIMEOUT;
 547	int ch, error, reset_dev = 0, reset_ch = 0;
 548	u32 v, v_lo;
 
 
 549
 550	for (ch = 0; ch < priv->num_channels; ch++) {
 551		/* skip channels without errors */
 552		if (!(isr & (1 << (ch * 2 + 1))))
 553			continue;
 
 
 
 
 
 
 554
 555		error = -EINVAL;
 556
 557		v = in_be32(priv->chan[ch].reg + TALITOS_CCPSR);
 558		v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
 559
 560		if (v_lo & TALITOS_CCPSR_LO_DOF) {
 561			dev_err(dev, "double fetch fifo overflow error\n");
 562			error = -EAGAIN;
 563			reset_ch = 1;
 564		}
 565		if (v_lo & TALITOS_CCPSR_LO_SOF) {
 566			/* h/w dropped descriptor */
 567			dev_err(dev, "single fetch fifo overflow error\n");
 568			error = -EAGAIN;
 569		}
 570		if (v_lo & TALITOS_CCPSR_LO_MDTE)
 571			dev_err(dev, "master data transfer error\n");
 572		if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
 573			dev_err(dev, "s/g data length zero error\n");
 
 574		if (v_lo & TALITOS_CCPSR_LO_FPZ)
 575			dev_err(dev, "fetch pointer zero error\n");
 
 576		if (v_lo & TALITOS_CCPSR_LO_IDH)
 577			dev_err(dev, "illegal descriptor header error\n");
 578		if (v_lo & TALITOS_CCPSR_LO_IEU)
 579			dev_err(dev, "invalid execution unit error\n");
 
 580		if (v_lo & TALITOS_CCPSR_LO_EU)
 581			report_eu_error(dev, ch, current_desc_hdr(dev, ch));
 582		if (v_lo & TALITOS_CCPSR_LO_GB)
 583			dev_err(dev, "gather boundary error\n");
 584		if (v_lo & TALITOS_CCPSR_LO_GRL)
 585			dev_err(dev, "gather return/length error\n");
 586		if (v_lo & TALITOS_CCPSR_LO_SB)
 587			dev_err(dev, "scatter boundary error\n");
 588		if (v_lo & TALITOS_CCPSR_LO_SRL)
 589			dev_err(dev, "scatter return/length error\n");
 
 
 590
 591		flush_channel(dev, ch, error, reset_ch);
 592
 593		if (reset_ch) {
 594			reset_channel(dev, ch);
 595		} else {
 596			setbits32(priv->chan[ch].reg + TALITOS_CCCR,
 597				  TALITOS_CCCR_CONT);
 598			setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
 599			while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
 600			       TALITOS_CCCR_CONT) && --timeout)
 601				cpu_relax();
 602			if (timeout == 0) {
 603				dev_err(dev, "failed to restart channel %d\n",
 604					ch);
 605				reset_dev = 1;
 606			}
 607		}
 608	}
 609	if (reset_dev || isr & ~TALITOS_ISR_4CHERR || isr_lo) {
 610		dev_err(dev, "done overflow, internal time out, or rngu error: "
 611		        "ISR 0x%08x_%08x\n", isr, isr_lo);
 
 
 
 
 
 612
 613		/* purge request queues */
 614		for (ch = 0; ch < priv->num_channels; ch++)
 615			flush_channel(dev, ch, -EIO, 1);
 616
 617		/* reset and reinitialize the device */
 618		init_device(dev);
 619	}
 620}
 621
 622#define DEF_TALITOS_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)	       \
 623static irqreturn_t talitos_interrupt_##name(int irq, void *data)	       \
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 624{									       \
 625	struct device *dev = data;					       \
 626	struct talitos_private *priv = dev_get_drvdata(dev);		       \
 627	u32 isr, isr_lo;						       \
 628	unsigned long flags;						       \
 629									       \
 630	spin_lock_irqsave(&priv->reg_lock, flags);			       \
 631	isr = in_be32(priv->reg + TALITOS_ISR);				       \
 632	isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);			       \
 633	/* Acknowledge interrupt */					       \
 634	out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
 635	out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);			       \
 636									       \
 637	if (unlikely(isr & ch_err_mask || isr_lo)) {			       \
 638		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
 639		talitos_error(dev, isr & ch_err_mask, isr_lo);		       \
 640	}								       \
 641	else {								       \
 642		if (likely(isr & ch_done_mask)) {			       \
 643			/* mask further done interrupts. */		       \
 644			clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
 645			/* done_task will unmask done interrupts at exit */    \
 646			tasklet_schedule(&priv->done_task[tlet]);	       \
 647		}							       \
 648		spin_unlock_irqrestore(&priv->reg_lock, flags);		       \
 649	}								       \
 650									       \
 651	return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
 652								IRQ_NONE;      \
 653}
 654DEF_TALITOS_INTERRUPT(4ch, TALITOS_ISR_4CHDONE, TALITOS_ISR_4CHERR, 0)
 655DEF_TALITOS_INTERRUPT(ch0_2, TALITOS_ISR_CH_0_2_DONE, TALITOS_ISR_CH_0_2_ERR, 0)
 656DEF_TALITOS_INTERRUPT(ch1_3, TALITOS_ISR_CH_1_3_DONE, TALITOS_ISR_CH_1_3_ERR, 1)
 
 
 
 657
 658/*
 659 * hwrng
 660 */
 661static int talitos_rng_data_present(struct hwrng *rng, int wait)
 662{
 663	struct device *dev = (struct device *)rng->priv;
 664	struct talitos_private *priv = dev_get_drvdata(dev);
 665	u32 ofl;
 666	int i;
 667
 668	for (i = 0; i < 20; i++) {
 669		ofl = in_be32(priv->reg + TALITOS_RNGUSR_LO) &
 670		      TALITOS_RNGUSR_LO_OFL;
 671		if (ofl || !wait)
 672			break;
 673		udelay(10);
 674	}
 675
 676	return !!ofl;
 677}
 678
 679static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
 680{
 681	struct device *dev = (struct device *)rng->priv;
 682	struct talitos_private *priv = dev_get_drvdata(dev);
 683
 684	/* rng fifo requires 64-bit accesses */
 685	*data = in_be32(priv->reg + TALITOS_RNGU_FIFO);
 686	*data = in_be32(priv->reg + TALITOS_RNGU_FIFO_LO);
 687
 688	return sizeof(u32);
 689}
 690
 691static int talitos_rng_init(struct hwrng *rng)
 692{
 693	struct device *dev = (struct device *)rng->priv;
 694	struct talitos_private *priv = dev_get_drvdata(dev);
 695	unsigned int timeout = TALITOS_TIMEOUT;
 696
 697	setbits32(priv->reg + TALITOS_RNGURCR_LO, TALITOS_RNGURCR_LO_SR);
 698	while (!(in_be32(priv->reg + TALITOS_RNGUSR_LO) & TALITOS_RNGUSR_LO_RD)
 
 699	       && --timeout)
 700		cpu_relax();
 701	if (timeout == 0) {
 702		dev_err(dev, "failed to reset rng hw\n");
 703		return -ENODEV;
 704	}
 705
 706	/* start generating */
 707	setbits32(priv->reg + TALITOS_RNGUDSR_LO, 0);
 708
 709	return 0;
 710}
 711
 712static int talitos_register_rng(struct device *dev)
 713{
 714	struct talitos_private *priv = dev_get_drvdata(dev);
 
 715
 716	priv->rng.name		= dev_driver_string(dev),
 717	priv->rng.init		= talitos_rng_init,
 718	priv->rng.data_present	= talitos_rng_data_present,
 719	priv->rng.data_read	= talitos_rng_data_read,
 720	priv->rng.priv		= (unsigned long)dev;
 721
 722	return hwrng_register(&priv->rng);
 
 
 
 
 723}
 724
 725static void talitos_unregister_rng(struct device *dev)
 726{
 727	struct talitos_private *priv = dev_get_drvdata(dev);
 728
 
 
 
 729	hwrng_unregister(&priv->rng);
 
 730}
 731
 732/*
 733 * crypto alg
 734 */
 735#define TALITOS_CRA_PRIORITY		3000
 736#define TALITOS_MAX_KEY_SIZE		64
 
 
 
 
 
 
 
 
 
 737#define TALITOS_MAX_IV_LENGTH		16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
 738
 739#define MD5_BLOCK_SIZE    64
 740
 741struct talitos_ctx {
 742	struct device *dev;
 743	int ch;
 744	__be32 desc_hdr_template;
 745	u8 key[TALITOS_MAX_KEY_SIZE];
 746	u8 iv[TALITOS_MAX_IV_LENGTH];
 
 747	unsigned int keylen;
 748	unsigned int enckeylen;
 749	unsigned int authkeylen;
 750	unsigned int authsize;
 751};
 752
 753#define HASH_MAX_BLOCK_SIZE		SHA512_BLOCK_SIZE
 754#define TALITOS_MDEU_MAX_CONTEXT_SIZE	TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
 755
 756struct talitos_ahash_req_ctx {
 757	u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
 758	unsigned int hw_context_size;
 759	u8 buf[HASH_MAX_BLOCK_SIZE];
 760	u8 bufnext[HASH_MAX_BLOCK_SIZE];
 761	unsigned int swinit;
 762	unsigned int first;
 763	unsigned int last;
 764	unsigned int to_hash_later;
 765	u64 nbuf;
 766	struct scatterlist bufsl[2];
 767	struct scatterlist *psrc;
 768};
 769
 770static int aead_setauthsize(struct crypto_aead *authenc,
 771			    unsigned int authsize)
 772{
 773	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
 774
 775	ctx->authsize = authsize;
 776
 777	return 0;
 778}
 779
 780static int aead_setkey(struct crypto_aead *authenc,
 781		       const u8 *key, unsigned int keylen)
 782{
 783	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
 784	struct rtattr *rta = (void *)key;
 785	struct crypto_authenc_key_param *param;
 786	unsigned int authkeylen;
 787	unsigned int enckeylen;
 788
 789	if (!RTA_OK(rta, keylen))
 790		goto badkey;
 791
 792	if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
 793		goto badkey;
 794
 795	if (RTA_PAYLOAD(rta) < sizeof(*param))
 796		goto badkey;
 797
 798	param = RTA_DATA(rta);
 799	enckeylen = be32_to_cpu(param->enckeylen);
 800
 801	key += RTA_ALIGN(rta->rta_len);
 802	keylen -= RTA_ALIGN(rta->rta_len);
 
 
 
 803
 804	if (keylen < enckeylen)
 805		goto badkey;
 806
 807	authkeylen = keylen - enckeylen;
 
 
 
 808
 809	if (keylen > TALITOS_MAX_KEY_SIZE)
 810		goto badkey;
 
 
 
 
 
 811
 812	memcpy(&ctx->key, key, keylen);
 
 
 
 
 
 
 813
 814	ctx->keylen = keylen;
 815	ctx->enckeylen = enckeylen;
 816	ctx->authkeylen = authkeylen;
 817
 818	return 0;
 819
 820badkey:
 821	crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
 822	return -EINVAL;
 823}
 824
 825/*
 826 * talitos_edesc - s/w-extended descriptor
 827 * @src_nents: number of segments in input scatterlist
 828 * @dst_nents: number of segments in output scatterlist
 829 * @dma_len: length of dma mapped link_tbl space
 830 * @dma_link_tbl: bus physical address of link_tbl
 831 * @desc: h/w descriptor
 832 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1)
 833 *
 834 * if decrypting (with authcheck), or either one of src_nents or dst_nents
 835 * is greater than 1, an integrity check value is concatenated to the end
 836 * of link_tbl data
 837 */
 838struct talitos_edesc {
 839	int src_nents;
 840	int dst_nents;
 841	int src_is_chained;
 842	int dst_is_chained;
 843	int dma_len;
 844	dma_addr_t dma_link_tbl;
 845	struct talitos_desc desc;
 846	struct talitos_ptr link_tbl[0];
 847};
 848
 849static int talitos_map_sg(struct device *dev, struct scatterlist *sg,
 850			  unsigned int nents, enum dma_data_direction dir,
 851			  int chained)
 852{
 853	if (unlikely(chained))
 854		while (sg) {
 855			dma_map_sg(dev, sg, 1, dir);
 856			sg = scatterwalk_sg_next(sg);
 857		}
 858	else
 859		dma_map_sg(dev, sg, nents, dir);
 860	return nents;
 861}
 862
 863static void talitos_unmap_sg_chain(struct device *dev, struct scatterlist *sg,
 864				   enum dma_data_direction dir)
 865{
 866	while (sg) {
 867		dma_unmap_sg(dev, sg, 1, dir);
 868		sg = scatterwalk_sg_next(sg);
 869	}
 870}
 871
 872static void talitos_sg_unmap(struct device *dev,
 873			     struct talitos_edesc *edesc,
 874			     struct scatterlist *src,
 875			     struct scatterlist *dst)
 
 876{
 
 
 877	unsigned int src_nents = edesc->src_nents ? : 1;
 878	unsigned int dst_nents = edesc->dst_nents ? : 1;
 879
 
 
 
 
 
 
 880	if (src != dst) {
 881		if (edesc->src_is_chained)
 882			talitos_unmap_sg_chain(dev, src, DMA_TO_DEVICE);
 883		else
 884			dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
 885
 886		if (dst) {
 887			if (edesc->dst_is_chained)
 888				talitos_unmap_sg_chain(dev, dst,
 889						       DMA_FROM_DEVICE);
 890			else
 891				dma_unmap_sg(dev, dst, dst_nents,
 892					     DMA_FROM_DEVICE);
 893		}
 894	} else
 895		if (edesc->src_is_chained)
 896			talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL);
 897		else
 898			dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
 899}
 900
 901static void ipsec_esp_unmap(struct device *dev,
 902			    struct talitos_edesc *edesc,
 903			    struct aead_request *areq)
 904{
 905	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE);
 906	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
 907	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
 908	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
 
 
 
 
 
 
 
 
 909
 910	dma_unmap_sg(dev, areq->assoc, 1, DMA_TO_DEVICE);
 911
 912	talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
 913
 914	if (edesc->dma_len)
 915		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
 916				 DMA_BIDIRECTIONAL);
 
 
 
 
 
 
 
 917}
 918
 919/*
 920 * ipsec_esp descriptor callbacks
 921 */
 922static void ipsec_esp_encrypt_done(struct device *dev,
 923				   struct talitos_desc *desc, void *context,
 924				   int err)
 925{
 926	struct aead_request *areq = context;
 927	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
 928	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
 929	struct talitos_edesc *edesc;
 930	struct scatterlist *sg;
 931	void *icvdata;
 932
 933	edesc = container_of(desc, struct talitos_edesc, desc);
 934
 935	ipsec_esp_unmap(dev, edesc, areq);
 936
 937	/* copy the generated ICV to dst */
 938	if (edesc->dma_len) {
 939		icvdata = &edesc->link_tbl[edesc->src_nents +
 940					   edesc->dst_nents + 2];
 941		sg = sg_last(areq->dst, edesc->dst_nents);
 942		memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize,
 943		       icvdata, ctx->authsize);
 944	}
 945
 946	kfree(edesc);
 947
 948	aead_request_complete(areq, err);
 949}
 950
 951static void ipsec_esp_decrypt_swauth_done(struct device *dev,
 952					  struct talitos_desc *desc,
 953					  void *context, int err)
 954{
 955	struct aead_request *req = context;
 956	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
 957	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
 958	struct talitos_edesc *edesc;
 959	struct scatterlist *sg;
 960	void *icvdata;
 961
 962	edesc = container_of(desc, struct talitos_edesc, desc);
 963
 964	ipsec_esp_unmap(dev, edesc, req);
 965
 966	if (!err) {
 967		/* auth check */
 968		if (edesc->dma_len)
 969			icvdata = &edesc->link_tbl[edesc->src_nents +
 970						   edesc->dst_nents + 2];
 971		else
 972			icvdata = &edesc->link_tbl[0];
 973
 974		sg = sg_last(req->dst, edesc->dst_nents ? : 1);
 975		err = memcmp(icvdata, (char *)sg_virt(sg) + sg->length -
 976			     ctx->authsize, ctx->authsize) ? -EBADMSG : 0;
 977	}
 978
 979	kfree(edesc);
 980
 981	aead_request_complete(req, err);
 982}
 983
 984static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
 985					  struct talitos_desc *desc,
 986					  void *context, int err)
 987{
 988	struct aead_request *req = context;
 989	struct talitos_edesc *edesc;
 990
 991	edesc = container_of(desc, struct talitos_edesc, desc);
 992
 993	ipsec_esp_unmap(dev, edesc, req);
 994
 995	/* check ICV auth status */
 996	if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
 997		     DESC_HDR_LO_ICCR1_PASS))
 998		err = -EBADMSG;
 999
1000	kfree(edesc);
1001
1002	aead_request_complete(req, err);
1003}
1004
1005/*
1006 * convert scatterlist to SEC h/w link table format
1007 * stop at cryptlen bytes
1008 */
1009static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
1010			   int cryptlen, struct talitos_ptr *link_tbl_ptr)
1011{
1012	int n_sg = sg_count;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1013
1014	while (n_sg--) {
1015		to_talitos_ptr(link_tbl_ptr, sg_dma_address(sg));
1016		link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg));
1017		link_tbl_ptr->j_extent = 0;
1018		link_tbl_ptr++;
1019		cryptlen -= sg_dma_len(sg);
1020		sg = scatterwalk_sg_next(sg);
1021	}
1022
1023	/* adjust (decrease) last one (or two) entry's len to cryptlen */
1024	link_tbl_ptr--;
1025	while (be16_to_cpu(link_tbl_ptr->len) <= (-cryptlen)) {
1026		/* Empty this entry, and move to previous one */
1027		cryptlen += be16_to_cpu(link_tbl_ptr->len);
1028		link_tbl_ptr->len = 0;
1029		sg_count--;
1030		link_tbl_ptr--;
1031	}
1032	link_tbl_ptr->len = cpu_to_be16(be16_to_cpu(link_tbl_ptr->len)
1033					+ cryptlen);
1034
1035	/* tag end of link table */
1036	link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1037
1038	return sg_count;
1039}
1040
 
 
 
 
 
 
 
 
 
1041/*
1042 * fill in and submit ipsec_esp descriptor
1043 */
1044static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1045		     u8 *giv, u64 seq,
1046		     void (*callback) (struct device *dev,
1047				       struct talitos_desc *desc,
1048				       void *context, int error))
1049{
1050	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
 
1051	struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1052	struct device *dev = ctx->dev;
1053	struct talitos_desc *desc = &edesc->desc;
1054	unsigned int cryptlen = areq->cryptlen;
1055	unsigned int authsize = ctx->authsize;
1056	unsigned int ivsize = crypto_aead_ivsize(aead);
 
1057	int sg_count, ret;
1058	int sg_link_tbl_len;
 
 
 
 
 
 
 
1059
1060	/* hmac key */
1061	map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
1062			       0, DMA_TO_DEVICE);
 
 
 
 
 
 
 
 
 
1063	/* hmac data */
1064	map_single_talitos_ptr(dev, &desc->ptr[1], areq->assoclen + ivsize,
1065			       sg_virt(areq->assoc), 0, DMA_TO_DEVICE);
 
 
 
 
 
 
1066	/* cipher iv */
1067	map_single_talitos_ptr(dev, &desc->ptr[2], ivsize, giv ?: areq->iv, 0,
1068			       DMA_TO_DEVICE);
1069
1070	/* cipher key */
1071	map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
1072			       (char *)&ctx->key + ctx->authkeylen, 0,
1073			       DMA_TO_DEVICE);
1074
1075	/*
1076	 * cipher in
1077	 * map and adjust cipher len to aead request cryptlen.
1078	 * extent is bytes of HMAC postpended to ciphertext,
1079	 * typically 12 for ipsec
1080	 */
1081	desc->ptr[4].len = cpu_to_be16(cryptlen);
1082	desc->ptr[4].j_extent = authsize;
1083
1084	sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1,
1085				  (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
1086							   : DMA_TO_DEVICE,
1087				  edesc->src_is_chained);
1088
1089	if (sg_count == 1) {
1090		to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src));
1091	} else {
1092		sg_link_tbl_len = cryptlen;
1093
1094		if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
1095			sg_link_tbl_len = cryptlen + authsize;
1096
1097		sg_count = sg_to_link_tbl(areq->src, sg_count, sg_link_tbl_len,
1098					  &edesc->link_tbl[0]);
1099		if (sg_count > 1) {
1100			desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1101			to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl);
1102			dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1103						   edesc->dma_len,
1104						   DMA_BIDIRECTIONAL);
1105		} else {
1106			/* Only one segment now, so no link tbl needed */
1107			to_talitos_ptr(&desc->ptr[4],
1108				       sg_dma_address(areq->src));
1109		}
1110	}
1111
1112	/* cipher out */
1113	desc->ptr[5].len = cpu_to_be16(cryptlen);
1114	desc->ptr[5].j_extent = authsize;
 
 
 
1115
1116	if (areq->src != areq->dst)
1117		sg_count = talitos_map_sg(dev, areq->dst,
1118					  edesc->dst_nents ? : 1,
1119					  DMA_FROM_DEVICE,
1120					  edesc->dst_is_chained);
1121
1122	if (sg_count == 1) {
1123		to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst));
1124	} else {
1125		struct talitos_ptr *link_tbl_ptr =
1126			&edesc->link_tbl[edesc->src_nents + 1];
1127
1128		to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
1129			       (edesc->src_nents + 1) *
1130			       sizeof(struct talitos_ptr));
1131		sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
1132					  link_tbl_ptr);
1133
1134		/* Add an entry to the link table for ICV data */
1135		link_tbl_ptr += sg_count - 1;
1136		link_tbl_ptr->j_extent = 0;
1137		sg_count++;
1138		link_tbl_ptr++;
1139		link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
1140		link_tbl_ptr->len = cpu_to_be16(authsize);
1141
1142		/* icv data follows link tables */
1143		to_talitos_ptr(link_tbl_ptr, edesc->dma_link_tbl +
1144			       (edesc->src_nents + edesc->dst_nents + 2) *
1145			       sizeof(struct talitos_ptr));
1146		desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
1147		dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1148					   edesc->dma_len, DMA_BIDIRECTIONAL);
 
 
 
1149	}
1150
1151	/* iv out */
1152	map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, 0,
1153			       DMA_FROM_DEVICE);
 
 
 
 
 
 
1154
1155	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1156	if (ret != -EINPROGRESS) {
1157		ipsec_esp_unmap(dev, edesc, areq);
1158		kfree(edesc);
1159	}
1160	return ret;
1161}
1162
1163/*
1164 * derive number of elements in scatterlist
1165 */
1166static int sg_count(struct scatterlist *sg_list, int nbytes, int *chained)
1167{
1168	struct scatterlist *sg = sg_list;
1169	int sg_nents = 0;
1170
1171	*chained = 0;
1172	while (nbytes > 0) {
1173		sg_nents++;
1174		nbytes -= sg->length;
1175		if (!sg_is_last(sg) && (sg + 1)->length == 0)
1176			*chained = 1;
1177		sg = scatterwalk_sg_next(sg);
1178	}
1179
1180	return sg_nents;
1181}
1182
1183/**
1184 * sg_copy_end_to_buffer - Copy end data from SG list to a linear buffer
1185 * @sgl:		 The SG list
1186 * @nents:		 Number of SG entries
1187 * @buf:		 Where to copy to
1188 * @buflen:		 The number of bytes to copy
1189 * @skip:		 The number of bytes to skip before copying.
1190 *                       Note: skip + buflen should equal SG total size.
1191 *
1192 * Returns the number of copied bytes.
1193 *
1194 **/
1195static size_t sg_copy_end_to_buffer(struct scatterlist *sgl, unsigned int nents,
1196				    void *buf, size_t buflen, unsigned int skip)
1197{
1198	unsigned int offset = 0;
1199	unsigned int boffset = 0;
1200	struct sg_mapping_iter miter;
1201	unsigned long flags;
1202	unsigned int sg_flags = SG_MITER_ATOMIC;
1203	size_t total_buffer = buflen + skip;
1204
1205	sg_flags |= SG_MITER_FROM_SG;
1206
1207	sg_miter_start(&miter, sgl, nents, sg_flags);
1208
1209	local_irq_save(flags);
1210
1211	while (sg_miter_next(&miter) && offset < total_buffer) {
1212		unsigned int len;
1213		unsigned int ignore;
1214
1215		if ((offset + miter.length) > skip) {
1216			if (offset < skip) {
1217				/* Copy part of this segment */
1218				ignore = skip - offset;
1219				len = miter.length - ignore;
1220				if (boffset + len > buflen)
1221					len = buflen - boffset;
1222				memcpy(buf + boffset, miter.addr + ignore, len);
1223			} else {
1224				/* Copy all of this segment (up to buflen) */
1225				len = miter.length;
1226				if (boffset + len > buflen)
1227					len = buflen - boffset;
1228				memcpy(buf + boffset, miter.addr, len);
1229			}
1230			boffset += len;
1231		}
1232		offset += miter.length;
1233	}
1234
1235	sg_miter_stop(&miter);
1236
1237	local_irq_restore(flags);
1238	return boffset;
1239}
1240
1241/*
1242 * allocate and map the extended descriptor
1243 */
1244static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1245						 struct scatterlist *src,
1246						 struct scatterlist *dst,
1247						 int hash_result,
 
1248						 unsigned int cryptlen,
1249						 unsigned int authsize,
 
1250						 int icv_stashing,
1251						 u32 cryptoflags)
 
1252{
1253	struct talitos_edesc *edesc;
1254	int src_nents, dst_nents, alloc_len, dma_len;
1255	int src_chained, dst_chained = 0;
1256	gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1257		      GFP_ATOMIC;
 
 
 
1258
1259	if (cryptlen + authsize > TALITOS_MAX_DATA_LEN) {
1260		dev_err(dev, "length exceeds h/w max limit\n");
1261		return ERR_PTR(-EINVAL);
1262	}
1263
1264	src_nents = sg_count(src, cryptlen + authsize, &src_chained);
1265	src_nents = (src_nents == 1) ? 0 : src_nents;
1266
1267	if (hash_result) {
1268		dst_nents = 0;
1269	} else {
1270		if (dst == src) {
1271			dst_nents = src_nents;
1272		} else {
1273			dst_nents = sg_count(dst, cryptlen + authsize,
1274					     &dst_chained);
1275			dst_nents = (dst_nents == 1) ? 0 : dst_nents;
 
 
 
 
 
 
 
 
 
 
 
1276		}
 
1277	}
1278
1279	/*
1280	 * allocate space for base edesc plus the link tables,
1281	 * allowing for two separate entries for ICV and generated ICV (+ 2),
1282	 * and the ICV data itself
1283	 */
1284	alloc_len = sizeof(struct talitos_edesc);
1285	if (src_nents || dst_nents) {
1286		dma_len = (src_nents + dst_nents + 2) *
1287				 sizeof(struct talitos_ptr) + authsize;
 
 
 
 
1288		alloc_len += dma_len;
1289	} else {
1290		dma_len = 0;
1291		alloc_len += icv_stashing ? authsize : 0;
1292	}
 
 
 
 
 
 
1293
1294	edesc = kmalloc(alloc_len, GFP_DMA | flags);
1295	if (!edesc) {
1296		dev_err(dev, "could not allocate edescriptor\n");
1297		return ERR_PTR(-ENOMEM);
 
 
 
1298	}
 
1299
1300	edesc->src_nents = src_nents;
1301	edesc->dst_nents = dst_nents;
1302	edesc->src_is_chained = src_chained;
1303	edesc->dst_is_chained = dst_chained;
1304	edesc->dma_len = dma_len;
1305	if (dma_len)
1306		edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1307						     edesc->dma_len,
1308						     DMA_BIDIRECTIONAL);
1309
1310	return edesc;
1311}
1312
1313static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq,
1314					      int icv_stashing)
1315{
1316	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
 
1317	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
 
 
1318
1319	return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, 0,
1320				   areq->cryptlen, ctx->authsize, icv_stashing,
1321				   areq->base.flags);
 
1322}
1323
1324static int aead_encrypt(struct aead_request *req)
1325{
1326	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1327	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1328	struct talitos_edesc *edesc;
1329
1330	/* allocate extended descriptor */
1331	edesc = aead_edesc_alloc(req, 0);
1332	if (IS_ERR(edesc))
1333		return PTR_ERR(edesc);
1334
1335	/* set encrypt */
1336	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1337
1338	return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_encrypt_done);
1339}
1340
1341static int aead_decrypt(struct aead_request *req)
1342{
1343	struct crypto_aead *authenc = crypto_aead_reqtfm(req);
 
1344	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1345	unsigned int authsize = ctx->authsize;
1346	struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1347	struct talitos_edesc *edesc;
1348	struct scatterlist *sg;
1349	void *icvdata;
1350
1351	req->cryptlen -= authsize;
1352
1353	/* allocate extended descriptor */
1354	edesc = aead_edesc_alloc(req, 1);
1355	if (IS_ERR(edesc))
1356		return PTR_ERR(edesc);
1357
1358	if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
 
1359	    ((!edesc->src_nents && !edesc->dst_nents) ||
1360	     priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1361
1362		/* decrypt and check the ICV */
1363		edesc->desc.hdr = ctx->desc_hdr_template |
1364				  DESC_HDR_DIR_INBOUND |
1365				  DESC_HDR_MODE1_MDEU_CICV;
1366
1367		/* reset integrity check result bits */
1368		edesc->desc.hdr_lo = 0;
1369
1370		return ipsec_esp(edesc, req, NULL, 0,
1371				 ipsec_esp_decrypt_hwauth_done);
1372
1373	}
1374
1375	/* Have to check the ICV with software */
1376	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1377
1378	/* stash incoming ICV for later cmp with ICV generated by the h/w */
1379	if (edesc->dma_len)
1380		icvdata = &edesc->link_tbl[edesc->src_nents +
1381					   edesc->dst_nents + 2];
1382	else
1383		icvdata = &edesc->link_tbl[0];
1384
1385	sg = sg_last(req->src, edesc->src_nents ? : 1);
1386
1387	memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize,
1388	       ctx->authsize);
1389
1390	return ipsec_esp(edesc, req, NULL, 0, ipsec_esp_decrypt_swauth_done);
1391}
1392
1393static int aead_givencrypt(struct aead_givcrypt_request *req)
 
1394{
1395	struct aead_request *areq = &req->areq;
1396	struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1397	struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1398	struct talitos_edesc *edesc;
1399
1400	/* allocate extended descriptor */
1401	edesc = aead_edesc_alloc(areq, 0);
1402	if (IS_ERR(edesc))
1403		return PTR_ERR(edesc);
1404
1405	/* set encrypt */
1406	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1407
1408	memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
1409	/* avoid consecutive packets going out with same IV */
1410	*(__be64 *)req->giv ^= cpu_to_be64(req->seq);
1411
1412	return ipsec_esp(edesc, areq, req->giv, req->seq,
1413			 ipsec_esp_encrypt_done);
1414}
1415
1416static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1417			     const u8 *key, unsigned int keylen)
 
 
 
 
 
 
 
1418{
1419	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
 
 
1420
1421	memcpy(&ctx->key, key, keylen);
1422	ctx->keylen = keylen;
 
 
 
 
1423
1424	return 0;
1425}
1426
1427static void common_nonsnoop_unmap(struct device *dev,
1428				  struct talitos_edesc *edesc,
1429				  struct ablkcipher_request *areq)
1430{
1431	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1432	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
 
1433	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1434
1435	talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
1436
1437	if (edesc->dma_len)
1438		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1439				 DMA_BIDIRECTIONAL);
1440}
1441
1442static void ablkcipher_done(struct device *dev,
1443			    struct talitos_desc *desc, void *context,
1444			    int err)
1445{
1446	struct ablkcipher_request *areq = context;
 
 
 
1447	struct talitos_edesc *edesc;
1448
1449	edesc = container_of(desc, struct talitos_edesc, desc);
1450
1451	common_nonsnoop_unmap(dev, edesc, areq);
 
1452
1453	kfree(edesc);
1454
1455	areq->base.complete(&areq->base, err);
1456}
1457
1458static int common_nonsnoop(struct talitos_edesc *edesc,
1459			   struct ablkcipher_request *areq,
1460			   void (*callback) (struct device *dev,
1461					     struct talitos_desc *desc,
1462					     void *context, int error))
1463{
1464	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1465	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1466	struct device *dev = ctx->dev;
1467	struct talitos_desc *desc = &edesc->desc;
1468	unsigned int cryptlen = areq->nbytes;
1469	unsigned int ivsize;
1470	int sg_count, ret;
 
 
 
1471
1472	/* first DWORD empty */
1473	desc->ptr[0].len = 0;
1474	to_talitos_ptr(&desc->ptr[0], 0);
1475	desc->ptr[0].j_extent = 0;
1476
1477	/* cipher iv */
1478	ivsize = crypto_ablkcipher_ivsize(cipher);
1479	map_single_talitos_ptr(dev, &desc->ptr[1], ivsize, areq->info, 0,
1480			       DMA_TO_DEVICE);
1481
1482	/* cipher key */
1483	map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1484			       (char *)&ctx->key, 0, DMA_TO_DEVICE);
1485
 
 
 
 
 
 
 
 
1486	/*
1487	 * cipher in
1488	 */
1489	desc->ptr[3].len = cpu_to_be16(cryptlen);
1490	desc->ptr[3].j_extent = 0;
 
 
1491
1492	sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1,
1493				  (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
1494							   : DMA_TO_DEVICE,
1495				  edesc->src_is_chained);
1496
1497	if (sg_count == 1) {
1498		to_talitos_ptr(&desc->ptr[3], sg_dma_address(areq->src));
1499	} else {
1500		sg_count = sg_to_link_tbl(areq->src, sg_count, cryptlen,
1501					  &edesc->link_tbl[0]);
1502		if (sg_count > 1) {
1503			to_talitos_ptr(&desc->ptr[3], edesc->dma_link_tbl);
1504			desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP;
1505			dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1506						   edesc->dma_len,
1507						   DMA_BIDIRECTIONAL);
1508		} else {
1509			/* Only one segment now, so no link tbl needed */
1510			to_talitos_ptr(&desc->ptr[3],
1511				       sg_dma_address(areq->src));
1512		}
1513	}
1514
1515	/* cipher out */
1516	desc->ptr[4].len = cpu_to_be16(cryptlen);
1517	desc->ptr[4].j_extent = 0;
1518
1519	if (areq->src != areq->dst)
1520		sg_count = talitos_map_sg(dev, areq->dst,
1521					  edesc->dst_nents ? : 1,
1522					  DMA_FROM_DEVICE,
1523					  edesc->dst_is_chained);
1524
1525	if (sg_count == 1) {
1526		to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->dst));
1527	} else {
1528		struct talitos_ptr *link_tbl_ptr =
1529			&edesc->link_tbl[edesc->src_nents + 1];
1530
1531		to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
1532					      (edesc->src_nents + 1) *
1533					      sizeof(struct talitos_ptr));
1534		desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1535		sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
1536					  link_tbl_ptr);
1537		dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1538					   edesc->dma_len, DMA_BIDIRECTIONAL);
1539	}
1540
1541	/* iv out */
1542	map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv, 0,
1543			       DMA_FROM_DEVICE);
1544
1545	/* last DWORD empty */
1546	desc->ptr[6].len = 0;
1547	to_talitos_ptr(&desc->ptr[6], 0);
1548	desc->ptr[6].j_extent = 0;
 
1549
1550	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1551	if (ret != -EINPROGRESS) {
1552		common_nonsnoop_unmap(dev, edesc, areq);
1553		kfree(edesc);
1554	}
1555	return ret;
1556}
1557
1558static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1559						    areq)
1560{
1561	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1562	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
 
1563
1564	return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, 0,
1565				   areq->nbytes, 0, 0, areq->base.flags);
 
1566}
1567
1568static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1569{
1570	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1571	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1572	struct talitos_edesc *edesc;
 
 
 
 
 
 
 
 
1573
1574	/* allocate extended descriptor */
1575	edesc = ablkcipher_edesc_alloc(areq);
1576	if (IS_ERR(edesc))
1577		return PTR_ERR(edesc);
1578
1579	/* set encrypt */
1580	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1581
1582	return common_nonsnoop(edesc, areq, ablkcipher_done);
1583}
1584
1585static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1586{
1587	struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1588	struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1589	struct talitos_edesc *edesc;
 
 
 
 
 
 
 
 
1590
1591	/* allocate extended descriptor */
1592	edesc = ablkcipher_edesc_alloc(areq);
1593	if (IS_ERR(edesc))
1594		return PTR_ERR(edesc);
1595
1596	edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1597
1598	return common_nonsnoop(edesc, areq, ablkcipher_done);
1599}
1600
1601static void common_nonsnoop_hash_unmap(struct device *dev,
1602				       struct talitos_edesc *edesc,
1603				       struct ahash_request *areq)
1604{
1605	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
 
 
 
 
 
 
1606
1607	unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
 
 
 
 
 
 
 
 
 
1608
1609	/* When using hashctx-in, must unmap it. */
1610	if (edesc->desc.ptr[1].len)
1611		unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1612					 DMA_TO_DEVICE);
 
 
 
1613
1614	if (edesc->desc.ptr[2].len)
1615		unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
1616					 DMA_TO_DEVICE);
1617
1618	talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL);
1619
1620	if (edesc->dma_len)
1621		dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1622				 DMA_BIDIRECTIONAL);
1623
 
 
 
1624}
1625
1626static void ahash_done(struct device *dev,
1627		       struct talitos_desc *desc, void *context,
1628		       int err)
1629{
1630	struct ahash_request *areq = context;
1631	struct talitos_edesc *edesc =
1632		 container_of(desc, struct talitos_edesc, desc);
1633	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1634
1635	if (!req_ctx->last && req_ctx->to_hash_later) {
1636		/* Position any partial block for next update/final/finup */
1637		memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
1638		req_ctx->nbuf = req_ctx->to_hash_later;
1639	}
1640	common_nonsnoop_hash_unmap(dev, edesc, areq);
1641
1642	kfree(edesc);
1643
1644	areq->base.complete(&areq->base, err);
1645}
1646
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1647static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1648				struct ahash_request *areq, unsigned int length,
1649				void (*callback) (struct device *dev,
1650						  struct talitos_desc *desc,
1651						  void *context, int error))
1652{
1653	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1654	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1655	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1656	struct device *dev = ctx->dev;
1657	struct talitos_desc *desc = &edesc->desc;
1658	int sg_count, ret;
 
 
 
 
1659
1660	/* first DWORD empty */
1661	desc->ptr[0] = zero_entry;
1662
1663	/* hash context in */
1664	if (!req_ctx->first || req_ctx->swinit) {
1665		map_single_talitos_ptr(dev, &desc->ptr[1],
1666				       req_ctx->hw_context_size,
1667				       (char *)req_ctx->hw_context, 0,
1668				       DMA_TO_DEVICE);
1669		req_ctx->swinit = 0;
1670	} else {
1671		desc->ptr[1] = zero_entry;
1672		/* Indicate next op is not the first. */
1673		req_ctx->first = 0;
1674	}
 
 
1675
1676	/* HMAC key */
1677	if (ctx->keylen)
1678		map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1679				       (char *)&ctx->key, 0, DMA_TO_DEVICE);
1680	else
1681		desc->ptr[2] = zero_entry;
 
1682
 
 
 
 
 
 
1683	/*
1684	 * data in
1685	 */
1686	desc->ptr[3].len = cpu_to_be16(length);
1687	desc->ptr[3].j_extent = 0;
1688
1689	sg_count = talitos_map_sg(dev, req_ctx->psrc,
1690				  edesc->src_nents ? : 1,
1691				  DMA_TO_DEVICE,
1692				  edesc->src_is_chained);
1693
1694	if (sg_count == 1) {
1695		to_talitos_ptr(&desc->ptr[3], sg_dma_address(req_ctx->psrc));
1696	} else {
1697		sg_count = sg_to_link_tbl(req_ctx->psrc, sg_count, length,
1698					  &edesc->link_tbl[0]);
1699		if (sg_count > 1) {
1700			desc->ptr[3].j_extent |= DESC_PTR_LNKTBL_JUMP;
1701			to_talitos_ptr(&desc->ptr[3], edesc->dma_link_tbl);
1702			dma_sync_single_for_device(ctx->dev,
1703						   edesc->dma_link_tbl,
1704						   edesc->dma_len,
1705						   DMA_BIDIRECTIONAL);
1706		} else {
1707			/* Only one segment now, so no link tbl needed */
1708			to_talitos_ptr(&desc->ptr[3],
1709				       sg_dma_address(req_ctx->psrc));
1710		}
1711	}
1712
1713	/* fifth DWORD empty */
1714	desc->ptr[4] = zero_entry;
1715
1716	/* hash/HMAC out -or- hash context out */
1717	if (req_ctx->last)
1718		map_single_talitos_ptr(dev, &desc->ptr[5],
1719				       crypto_ahash_digestsize(tfm),
1720				       areq->result, 0, DMA_FROM_DEVICE);
1721	else
1722		map_single_talitos_ptr(dev, &desc->ptr[5],
1723				       req_ctx->hw_context_size,
1724				       req_ctx->hw_context, 0, DMA_FROM_DEVICE);
 
1725
1726	/* last DWORD empty */
1727	desc->ptr[6] = zero_entry;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1728
1729	ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1730	if (ret != -EINPROGRESS) {
1731		common_nonsnoop_hash_unmap(dev, edesc, areq);
1732		kfree(edesc);
1733	}
1734	return ret;
1735}
1736
1737static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1738					       unsigned int nbytes)
1739{
1740	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1741	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1742	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
 
 
1743
1744	return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, 1,
1745				   nbytes, 0, 0, areq->base.flags);
 
 
 
1746}
1747
1748static int ahash_init(struct ahash_request *areq)
1749{
1750	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
 
 
1751	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
 
 
1752
1753	/* Initialize the context */
 
1754	req_ctx->nbuf = 0;
1755	req_ctx->first = 1; /* first indicates h/w must init its context */
1756	req_ctx->swinit = 0; /* assume h/w init of context */
1757	req_ctx->hw_context_size =
1758		(crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1759			? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1760			: TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
 
 
 
 
 
1761
1762	return 0;
1763}
1764
1765/*
1766 * on h/w without explicit sha224 support, we initialize h/w context
1767 * manually with sha224 constants, and tell it to run sha256.
1768 */
1769static int ahash_init_sha224_swinit(struct ahash_request *areq)
1770{
1771	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1772
1773	ahash_init(areq);
1774	req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1775
1776	req_ctx->hw_context[0] = SHA224_H0;
1777	req_ctx->hw_context[1] = SHA224_H1;
1778	req_ctx->hw_context[2] = SHA224_H2;
1779	req_ctx->hw_context[3] = SHA224_H3;
1780	req_ctx->hw_context[4] = SHA224_H4;
1781	req_ctx->hw_context[5] = SHA224_H5;
1782	req_ctx->hw_context[6] = SHA224_H6;
1783	req_ctx->hw_context[7] = SHA224_H7;
1784
1785	/* init 64-bit count */
1786	req_ctx->hw_context[8] = 0;
1787	req_ctx->hw_context[9] = 0;
1788
 
 
 
1789	return 0;
1790}
1791
1792static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1793{
1794	struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1795	struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1796	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1797	struct talitos_edesc *edesc;
1798	unsigned int blocksize =
1799			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1800	unsigned int nbytes_to_hash;
1801	unsigned int to_hash_later;
1802	unsigned int nsg;
1803	int chained;
 
 
 
 
1804
1805	if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1806		/* Buffer up to one whole block */
1807		sg_copy_to_buffer(areq->src,
1808				  sg_count(areq->src, nbytes, &chained),
1809				  req_ctx->buf + req_ctx->nbuf, nbytes);
 
 
 
 
1810		req_ctx->nbuf += nbytes;
1811		return 0;
1812	}
1813
1814	/* At least (blocksize + 1) bytes are available to hash */
1815	nbytes_to_hash = nbytes + req_ctx->nbuf;
1816	to_hash_later = nbytes_to_hash & (blocksize - 1);
1817
1818	if (req_ctx->last)
1819		to_hash_later = 0;
1820	else if (to_hash_later)
1821		/* There is a partial block. Hash the full block(s) now */
1822		nbytes_to_hash -= to_hash_later;
1823	else {
1824		/* Keep one block buffered */
1825		nbytes_to_hash -= blocksize;
1826		to_hash_later = blocksize;
1827	}
1828
1829	/* Chain in any previously buffered data */
1830	if (req_ctx->nbuf) {
1831		nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
1832		sg_init_table(req_ctx->bufsl, nsg);
1833		sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
1834		if (nsg > 1)
1835			scatterwalk_sg_chain(req_ctx->bufsl, 2, areq->src);
1836		req_ctx->psrc = req_ctx->bufsl;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1837	} else
1838		req_ctx->psrc = areq->src;
1839
1840	if (to_hash_later) {
1841		int nents = sg_count(areq->src, nbytes, &chained);
1842		sg_copy_end_to_buffer(areq->src, nents,
1843				      req_ctx->bufnext,
 
 
 
 
1844				      to_hash_later,
1845				      nbytes - to_hash_later);
1846	}
1847	req_ctx->to_hash_later = to_hash_later;
1848
1849	/* Allocate extended descriptor */
1850	edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
1851	if (IS_ERR(edesc))
1852		return PTR_ERR(edesc);
1853
1854	edesc->desc.hdr = ctx->desc_hdr_template;
1855
1856	/* On last one, request SEC to pad; otherwise continue */
1857	if (req_ctx->last)
1858		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
1859	else
1860		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
1861
1862	/* request SEC to INIT hash. */
1863	if (req_ctx->first && !req_ctx->swinit)
1864		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
1865
1866	/* When the tfm context has a keylen, it's an HMAC.
1867	 * A first or last (ie. not middle) descriptor must request HMAC.
1868	 */
1869	if (ctx->keylen && (req_ctx->first || req_ctx->last))
1870		edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
1871
1872	return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
1873				    ahash_done);
1874}
1875
1876static int ahash_update(struct ahash_request *areq)
1877{
1878	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1879
1880	req_ctx->last = 0;
1881
1882	return ahash_process_req(areq, areq->nbytes);
1883}
1884
1885static int ahash_final(struct ahash_request *areq)
1886{
1887	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1888
1889	req_ctx->last = 1;
1890
1891	return ahash_process_req(areq, 0);
1892}
1893
1894static int ahash_finup(struct ahash_request *areq)
1895{
1896	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1897
1898	req_ctx->last = 1;
1899
1900	return ahash_process_req(areq, areq->nbytes);
1901}
1902
1903static int ahash_digest(struct ahash_request *areq)
1904{
1905	struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1906	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
1907
1908	ahash->init(areq);
1909	req_ctx->last = 1;
1910
1911	return ahash_process_req(areq, areq->nbytes);
1912}
1913
1914struct keyhash_result {
1915	struct completion completion;
1916	int err;
1917};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1918
1919static void keyhash_complete(struct crypto_async_request *req, int err)
1920{
1921	struct keyhash_result *res = req->data;
 
 
 
 
 
 
1922
1923	if (err == -EINPROGRESS)
1924		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1925
1926	res->err = err;
1927	complete(&res->completion);
1928}
1929
1930static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
1931		   u8 *hash)
1932{
1933	struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1934
1935	struct scatterlist sg[1];
1936	struct ahash_request *req;
1937	struct keyhash_result hresult;
1938	int ret;
1939
1940	init_completion(&hresult.completion);
1941
1942	req = ahash_request_alloc(tfm, GFP_KERNEL);
1943	if (!req)
1944		return -ENOMEM;
1945
1946	/* Keep tfm keylen == 0 during hash of the long key */
1947	ctx->keylen = 0;
1948	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1949				   keyhash_complete, &hresult);
1950
1951	sg_init_one(&sg[0], key, keylen);
1952
1953	ahash_request_set_crypt(req, sg, hash, keylen);
1954	ret = crypto_ahash_digest(req);
1955	switch (ret) {
1956	case 0:
1957		break;
1958	case -EINPROGRESS:
1959	case -EBUSY:
1960		ret = wait_for_completion_interruptible(
1961			&hresult.completion);
1962		if (!ret)
1963			ret = hresult.err;
1964		break;
1965	default:
1966		break;
1967	}
1968	ahash_request_free(req);
1969
1970	return ret;
1971}
1972
1973static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
1974			unsigned int keylen)
1975{
1976	struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
 
1977	unsigned int blocksize =
1978			crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1979	unsigned int digestsize = crypto_ahash_digestsize(tfm);
1980	unsigned int keysize = keylen;
1981	u8 hash[SHA512_DIGEST_SIZE];
1982	int ret;
1983
1984	if (keylen <= blocksize)
1985		memcpy(ctx->key, key, keysize);
1986	else {
1987		/* Must get the hash of the long key */
1988		ret = keyhash(tfm, key, keylen, hash);
1989
1990		if (ret) {
1991			crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1992			return -EINVAL;
1993		}
1994
1995		keysize = digestsize;
1996		memcpy(ctx->key, hash, digestsize);
1997	}
1998
 
 
 
1999	ctx->keylen = keysize;
 
2000
2001	return 0;
2002}
2003
2004
2005struct talitos_alg_template {
2006	u32 type;
 
2007	union {
2008		struct crypto_alg crypto;
2009		struct ahash_alg hash;
 
2010	} alg;
2011	__be32 desc_hdr_template;
2012};
2013
2014static struct talitos_alg_template driver_algs[] = {
2015	/* AEAD algorithms.  These use a single-pass ipsec_esp descriptor */
2016	{	.type = CRYPTO_ALG_TYPE_AEAD,
2017		.alg.crypto = {
2018			.cra_name = "authenc(hmac(sha1),cbc(aes))",
2019			.cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos",
2020			.cra_blocksize = AES_BLOCK_SIZE,
2021			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2022			.cra_type = &crypto_aead_type,
2023			.cra_aead = {
2024				.setkey = aead_setkey,
2025				.setauthsize = aead_setauthsize,
2026				.encrypt = aead_encrypt,
2027				.decrypt = aead_decrypt,
2028				.givencrypt = aead_givencrypt,
2029				.geniv = "<built-in>",
2030				.ivsize = AES_BLOCK_SIZE,
2031				.maxauthsize = SHA1_DIGEST_SIZE,
2032			}
2033		},
2034		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2035			             DESC_HDR_SEL0_AESU |
2036		                     DESC_HDR_MODE0_AESU_CBC |
2037		                     DESC_HDR_SEL1_MDEUA |
2038		                     DESC_HDR_MODE1_MDEU_INIT |
2039		                     DESC_HDR_MODE1_MDEU_PAD |
2040		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2041	},
2042	{	.type = CRYPTO_ALG_TYPE_AEAD,
2043		.alg.crypto = {
2044			.cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
2045			.cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos",
2046			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2047			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2048			.cra_type = &crypto_aead_type,
2049			.cra_aead = {
2050				.setkey = aead_setkey,
2051				.setauthsize = aead_setauthsize,
2052				.encrypt = aead_encrypt,
2053				.decrypt = aead_decrypt,
2054				.givencrypt = aead_givencrypt,
2055				.geniv = "<built-in>",
2056				.ivsize = DES3_EDE_BLOCK_SIZE,
2057				.maxauthsize = SHA1_DIGEST_SIZE,
2058			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2059		},
2060		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2061			             DESC_HDR_SEL0_DEU |
2062		                     DESC_HDR_MODE0_DEU_CBC |
2063		                     DESC_HDR_MODE0_DEU_3DES |
2064		                     DESC_HDR_SEL1_MDEUA |
2065		                     DESC_HDR_MODE1_MDEU_INIT |
2066		                     DESC_HDR_MODE1_MDEU_PAD |
2067		                     DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2068	},
2069	{	.type = CRYPTO_ALG_TYPE_AEAD,
2070		.alg.crypto = {
2071			.cra_name = "authenc(hmac(sha256),cbc(aes))",
2072			.cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos",
2073			.cra_blocksize = AES_BLOCK_SIZE,
2074			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2075			.cra_type = &crypto_aead_type,
2076			.cra_aead = {
2077				.setkey = aead_setkey,
2078				.setauthsize = aead_setauthsize,
2079				.encrypt = aead_encrypt,
2080				.decrypt = aead_decrypt,
2081				.givencrypt = aead_givencrypt,
2082				.geniv = "<built-in>",
2083				.ivsize = AES_BLOCK_SIZE,
2084				.maxauthsize = SHA256_DIGEST_SIZE,
2085			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2086		},
2087		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2088			             DESC_HDR_SEL0_AESU |
2089		                     DESC_HDR_MODE0_AESU_CBC |
2090		                     DESC_HDR_SEL1_MDEUA |
2091		                     DESC_HDR_MODE1_MDEU_INIT |
2092		                     DESC_HDR_MODE1_MDEU_PAD |
2093		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2094	},
2095	{	.type = CRYPTO_ALG_TYPE_AEAD,
2096		.alg.crypto = {
2097			.cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
2098			.cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos",
2099			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2100			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2101			.cra_type = &crypto_aead_type,
2102			.cra_aead = {
2103				.setkey = aead_setkey,
2104				.setauthsize = aead_setauthsize,
2105				.encrypt = aead_encrypt,
2106				.decrypt = aead_decrypt,
2107				.givencrypt = aead_givencrypt,
2108				.geniv = "<built-in>",
2109				.ivsize = DES3_EDE_BLOCK_SIZE,
2110				.maxauthsize = SHA256_DIGEST_SIZE,
2111			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2112		},
2113		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2114			             DESC_HDR_SEL0_DEU |
2115		                     DESC_HDR_MODE0_DEU_CBC |
2116		                     DESC_HDR_MODE0_DEU_3DES |
2117		                     DESC_HDR_SEL1_MDEUA |
2118		                     DESC_HDR_MODE1_MDEU_INIT |
2119		                     DESC_HDR_MODE1_MDEU_PAD |
2120		                     DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2121	},
2122	{	.type = CRYPTO_ALG_TYPE_AEAD,
2123		.alg.crypto = {
2124			.cra_name = "authenc(hmac(md5),cbc(aes))",
2125			.cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos",
2126			.cra_blocksize = AES_BLOCK_SIZE,
2127			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2128			.cra_type = &crypto_aead_type,
2129			.cra_aead = {
2130				.setkey = aead_setkey,
2131				.setauthsize = aead_setauthsize,
2132				.encrypt = aead_encrypt,
2133				.decrypt = aead_decrypt,
2134				.givencrypt = aead_givencrypt,
2135				.geniv = "<built-in>",
2136				.ivsize = AES_BLOCK_SIZE,
2137				.maxauthsize = MD5_DIGEST_SIZE,
2138			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2139		},
2140		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2141			             DESC_HDR_SEL0_AESU |
2142		                     DESC_HDR_MODE0_AESU_CBC |
2143		                     DESC_HDR_SEL1_MDEUA |
2144		                     DESC_HDR_MODE1_MDEU_INIT |
2145		                     DESC_HDR_MODE1_MDEU_PAD |
2146		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2147	},
2148	{	.type = CRYPTO_ALG_TYPE_AEAD,
2149		.alg.crypto = {
2150			.cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2151			.cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos",
2152			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2153			.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
2154			.cra_type = &crypto_aead_type,
2155			.cra_aead = {
2156				.setkey = aead_setkey,
2157				.setauthsize = aead_setauthsize,
2158				.encrypt = aead_encrypt,
2159				.decrypt = aead_decrypt,
2160				.givencrypt = aead_givencrypt,
2161				.geniv = "<built-in>",
2162				.ivsize = DES3_EDE_BLOCK_SIZE,
2163				.maxauthsize = MD5_DIGEST_SIZE,
2164			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2165		},
2166		.desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2167			             DESC_HDR_SEL0_DEU |
2168		                     DESC_HDR_MODE0_DEU_CBC |
2169		                     DESC_HDR_MODE0_DEU_3DES |
2170		                     DESC_HDR_SEL1_MDEUA |
2171		                     DESC_HDR_MODE1_MDEU_INIT |
2172		                     DESC_HDR_MODE1_MDEU_PAD |
2173		                     DESC_HDR_MODE1_MDEU_MD5_HMAC,
2174	},
2175	/* ABLKCIPHER algorithms. */
2176	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2177		.alg.crypto = {
2178			.cra_name = "cbc(aes)",
2179			.cra_driver_name = "cbc-aes-talitos",
2180			.cra_blocksize = AES_BLOCK_SIZE,
2181			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2182                                     CRYPTO_ALG_ASYNC,
2183			.cra_type = &crypto_ablkcipher_type,
2184			.cra_ablkcipher = {
2185				.setkey = ablkcipher_setkey,
2186				.encrypt = ablkcipher_encrypt,
2187				.decrypt = ablkcipher_decrypt,
2188				.geniv = "eseqiv",
2189				.min_keysize = AES_MIN_KEY_SIZE,
2190				.max_keysize = AES_MAX_KEY_SIZE,
2191				.ivsize = AES_BLOCK_SIZE,
2192			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2193		},
2194		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2195				     DESC_HDR_SEL0_AESU |
2196				     DESC_HDR_MODE0_AESU_CBC,
2197	},
2198	{	.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2199		.alg.crypto = {
2200			.cra_name = "cbc(des3_ede)",
2201			.cra_driver_name = "cbc-3des-talitos",
2202			.cra_blocksize = DES3_EDE_BLOCK_SIZE,
2203			.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2204                                     CRYPTO_ALG_ASYNC,
2205			.cra_type = &crypto_ablkcipher_type,
2206			.cra_ablkcipher = {
2207				.setkey = ablkcipher_setkey,
2208				.encrypt = ablkcipher_encrypt,
2209				.decrypt = ablkcipher_decrypt,
2210				.geniv = "eseqiv",
2211				.min_keysize = DES3_EDE_KEY_SIZE,
2212				.max_keysize = DES3_EDE_KEY_SIZE,
2213				.ivsize = DES3_EDE_BLOCK_SIZE,
2214			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2215		},
2216		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2217			             DESC_HDR_SEL0_DEU |
2218		                     DESC_HDR_MODE0_DEU_CBC |
2219		                     DESC_HDR_MODE0_DEU_3DES,
2220	},
2221	/* AHASH algorithms. */
2222	{	.type = CRYPTO_ALG_TYPE_AHASH,
2223		.alg.hash = {
2224			.init = ahash_init,
2225			.update = ahash_update,
2226			.final = ahash_final,
2227			.finup = ahash_finup,
2228			.digest = ahash_digest,
2229			.halg.digestsize = MD5_DIGEST_SIZE,
 
2230			.halg.base = {
2231				.cra_name = "md5",
2232				.cra_driver_name = "md5-talitos",
2233				.cra_blocksize = MD5_BLOCK_SIZE,
2234				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2235					     CRYPTO_ALG_ASYNC,
2236				.cra_type = &crypto_ahash_type
2237			}
2238		},
2239		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2240				     DESC_HDR_SEL0_MDEUA |
2241				     DESC_HDR_MODE0_MDEU_MD5,
2242	},
2243	{	.type = CRYPTO_ALG_TYPE_AHASH,
2244		.alg.hash = {
2245			.init = ahash_init,
2246			.update = ahash_update,
2247			.final = ahash_final,
2248			.finup = ahash_finup,
2249			.digest = ahash_digest,
2250			.halg.digestsize = SHA1_DIGEST_SIZE,
 
2251			.halg.base = {
2252				.cra_name = "sha1",
2253				.cra_driver_name = "sha1-talitos",
2254				.cra_blocksize = SHA1_BLOCK_SIZE,
2255				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2256					     CRYPTO_ALG_ASYNC,
2257				.cra_type = &crypto_ahash_type
2258			}
2259		},
2260		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2261				     DESC_HDR_SEL0_MDEUA |
2262				     DESC_HDR_MODE0_MDEU_SHA1,
2263	},
2264	{	.type = CRYPTO_ALG_TYPE_AHASH,
2265		.alg.hash = {
2266			.init = ahash_init,
2267			.update = ahash_update,
2268			.final = ahash_final,
2269			.finup = ahash_finup,
2270			.digest = ahash_digest,
2271			.halg.digestsize = SHA224_DIGEST_SIZE,
 
2272			.halg.base = {
2273				.cra_name = "sha224",
2274				.cra_driver_name = "sha224-talitos",
2275				.cra_blocksize = SHA224_BLOCK_SIZE,
2276				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2277					     CRYPTO_ALG_ASYNC,
2278				.cra_type = &crypto_ahash_type
2279			}
2280		},
2281		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2282				     DESC_HDR_SEL0_MDEUA |
2283				     DESC_HDR_MODE0_MDEU_SHA224,
2284	},
2285	{	.type = CRYPTO_ALG_TYPE_AHASH,
2286		.alg.hash = {
2287			.init = ahash_init,
2288			.update = ahash_update,
2289			.final = ahash_final,
2290			.finup = ahash_finup,
2291			.digest = ahash_digest,
2292			.halg.digestsize = SHA256_DIGEST_SIZE,
 
2293			.halg.base = {
2294				.cra_name = "sha256",
2295				.cra_driver_name = "sha256-talitos",
2296				.cra_blocksize = SHA256_BLOCK_SIZE,
2297				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2298					     CRYPTO_ALG_ASYNC,
2299				.cra_type = &crypto_ahash_type
2300			}
2301		},
2302		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2303				     DESC_HDR_SEL0_MDEUA |
2304				     DESC_HDR_MODE0_MDEU_SHA256,
2305	},
2306	{	.type = CRYPTO_ALG_TYPE_AHASH,
2307		.alg.hash = {
2308			.init = ahash_init,
2309			.update = ahash_update,
2310			.final = ahash_final,
2311			.finup = ahash_finup,
2312			.digest = ahash_digest,
2313			.halg.digestsize = SHA384_DIGEST_SIZE,
 
2314			.halg.base = {
2315				.cra_name = "sha384",
2316				.cra_driver_name = "sha384-talitos",
2317				.cra_blocksize = SHA384_BLOCK_SIZE,
2318				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2319					     CRYPTO_ALG_ASYNC,
2320				.cra_type = &crypto_ahash_type
2321			}
2322		},
2323		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2324				     DESC_HDR_SEL0_MDEUB |
2325				     DESC_HDR_MODE0_MDEUB_SHA384,
2326	},
2327	{	.type = CRYPTO_ALG_TYPE_AHASH,
2328		.alg.hash = {
2329			.init = ahash_init,
2330			.update = ahash_update,
2331			.final = ahash_final,
2332			.finup = ahash_finup,
2333			.digest = ahash_digest,
2334			.halg.digestsize = SHA512_DIGEST_SIZE,
 
2335			.halg.base = {
2336				.cra_name = "sha512",
2337				.cra_driver_name = "sha512-talitos",
2338				.cra_blocksize = SHA512_BLOCK_SIZE,
2339				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2340					     CRYPTO_ALG_ASYNC,
2341				.cra_type = &crypto_ahash_type
2342			}
2343		},
2344		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2345				     DESC_HDR_SEL0_MDEUB |
2346				     DESC_HDR_MODE0_MDEUB_SHA512,
2347	},
2348	{	.type = CRYPTO_ALG_TYPE_AHASH,
2349		.alg.hash = {
2350			.init = ahash_init,
2351			.update = ahash_update,
2352			.final = ahash_final,
2353			.finup = ahash_finup,
2354			.digest = ahash_digest,
2355			.setkey = ahash_setkey,
2356			.halg.digestsize = MD5_DIGEST_SIZE,
 
2357			.halg.base = {
2358				.cra_name = "hmac(md5)",
2359				.cra_driver_name = "hmac-md5-talitos",
2360				.cra_blocksize = MD5_BLOCK_SIZE,
2361				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2362					     CRYPTO_ALG_ASYNC,
2363				.cra_type = &crypto_ahash_type
2364			}
2365		},
2366		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2367				     DESC_HDR_SEL0_MDEUA |
2368				     DESC_HDR_MODE0_MDEU_MD5,
2369	},
2370	{	.type = CRYPTO_ALG_TYPE_AHASH,
2371		.alg.hash = {
2372			.init = ahash_init,
2373			.update = ahash_update,
2374			.final = ahash_final,
2375			.finup = ahash_finup,
2376			.digest = ahash_digest,
2377			.setkey = ahash_setkey,
2378			.halg.digestsize = SHA1_DIGEST_SIZE,
 
2379			.halg.base = {
2380				.cra_name = "hmac(sha1)",
2381				.cra_driver_name = "hmac-sha1-talitos",
2382				.cra_blocksize = SHA1_BLOCK_SIZE,
2383				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2384					     CRYPTO_ALG_ASYNC,
2385				.cra_type = &crypto_ahash_type
2386			}
2387		},
2388		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2389				     DESC_HDR_SEL0_MDEUA |
2390				     DESC_HDR_MODE0_MDEU_SHA1,
2391	},
2392	{	.type = CRYPTO_ALG_TYPE_AHASH,
2393		.alg.hash = {
2394			.init = ahash_init,
2395			.update = ahash_update,
2396			.final = ahash_final,
2397			.finup = ahash_finup,
2398			.digest = ahash_digest,
2399			.setkey = ahash_setkey,
2400			.halg.digestsize = SHA224_DIGEST_SIZE,
 
2401			.halg.base = {
2402				.cra_name = "hmac(sha224)",
2403				.cra_driver_name = "hmac-sha224-talitos",
2404				.cra_blocksize = SHA224_BLOCK_SIZE,
2405				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2406					     CRYPTO_ALG_ASYNC,
2407				.cra_type = &crypto_ahash_type
2408			}
2409		},
2410		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2411				     DESC_HDR_SEL0_MDEUA |
2412				     DESC_HDR_MODE0_MDEU_SHA224,
2413	},
2414	{	.type = CRYPTO_ALG_TYPE_AHASH,
2415		.alg.hash = {
2416			.init = ahash_init,
2417			.update = ahash_update,
2418			.final = ahash_final,
2419			.finup = ahash_finup,
2420			.digest = ahash_digest,
2421			.setkey = ahash_setkey,
2422			.halg.digestsize = SHA256_DIGEST_SIZE,
 
2423			.halg.base = {
2424				.cra_name = "hmac(sha256)",
2425				.cra_driver_name = "hmac-sha256-talitos",
2426				.cra_blocksize = SHA256_BLOCK_SIZE,
2427				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2428					     CRYPTO_ALG_ASYNC,
2429				.cra_type = &crypto_ahash_type
2430			}
2431		},
2432		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2433				     DESC_HDR_SEL0_MDEUA |
2434				     DESC_HDR_MODE0_MDEU_SHA256,
2435	},
2436	{	.type = CRYPTO_ALG_TYPE_AHASH,
2437		.alg.hash = {
2438			.init = ahash_init,
2439			.update = ahash_update,
2440			.final = ahash_final,
2441			.finup = ahash_finup,
2442			.digest = ahash_digest,
2443			.setkey = ahash_setkey,
2444			.halg.digestsize = SHA384_DIGEST_SIZE,
 
2445			.halg.base = {
2446				.cra_name = "hmac(sha384)",
2447				.cra_driver_name = "hmac-sha384-talitos",
2448				.cra_blocksize = SHA384_BLOCK_SIZE,
2449				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2450					     CRYPTO_ALG_ASYNC,
2451				.cra_type = &crypto_ahash_type
2452			}
2453		},
2454		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2455				     DESC_HDR_SEL0_MDEUB |
2456				     DESC_HDR_MODE0_MDEUB_SHA384,
2457	},
2458	{	.type = CRYPTO_ALG_TYPE_AHASH,
2459		.alg.hash = {
2460			.init = ahash_init,
2461			.update = ahash_update,
2462			.final = ahash_final,
2463			.finup = ahash_finup,
2464			.digest = ahash_digest,
2465			.setkey = ahash_setkey,
2466			.halg.digestsize = SHA512_DIGEST_SIZE,
 
2467			.halg.base = {
2468				.cra_name = "hmac(sha512)",
2469				.cra_driver_name = "hmac-sha512-talitos",
2470				.cra_blocksize = SHA512_BLOCK_SIZE,
2471				.cra_flags = CRYPTO_ALG_TYPE_AHASH |
2472					     CRYPTO_ALG_ASYNC,
2473				.cra_type = &crypto_ahash_type
2474			}
2475		},
2476		.desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2477				     DESC_HDR_SEL0_MDEUB |
2478				     DESC_HDR_MODE0_MDEUB_SHA512,
2479	}
2480};
2481
2482struct talitos_crypto_alg {
2483	struct list_head entry;
2484	struct device *dev;
2485	struct talitos_alg_template algt;
2486};
2487
2488static int talitos_cra_init(struct crypto_tfm *tfm)
 
2489{
2490	struct crypto_alg *alg = tfm->__crt_alg;
2491	struct talitos_crypto_alg *talitos_alg;
2492	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2493	struct talitos_private *priv;
2494
2495	if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
2496		talitos_alg = container_of(__crypto_ahash_alg(alg),
2497					   struct talitos_crypto_alg,
2498					   algt.alg.hash);
2499	else
2500		talitos_alg = container_of(alg, struct talitos_crypto_alg,
2501					   algt.alg.crypto);
2502
2503	/* update context with ptr to dev */
2504	ctx->dev = talitos_alg->dev;
2505
2506	/* assign SEC channel to tfm in round-robin fashion */
2507	priv = dev_get_drvdata(ctx->dev);
2508	ctx->ch = atomic_inc_return(&priv->last_chan) &
2509		  (priv->num_channels - 1);
2510
2511	/* copy descriptor header template value */
2512	ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
2513
2514	/* select done notification */
2515	ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
2516
2517	return 0;
2518}
2519
2520static int talitos_cra_init_aead(struct crypto_tfm *tfm)
2521{
2522	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
 
 
 
 
 
 
 
 
2523
2524	talitos_cra_init(tfm);
 
 
 
 
2525
2526	/* random first IV */
2527	get_random_bytes(ctx->iv, TALITOS_MAX_IV_LENGTH);
2528
2529	return 0;
2530}
2531
2532static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
2533{
 
 
2534	struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2535
2536	talitos_cra_init(tfm);
 
 
2537
2538	ctx->keylen = 0;
2539	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2540				 sizeof(struct talitos_ahash_req_ctx));
2541
2542	return 0;
 
 
 
 
 
 
 
 
 
2543}
2544
2545/*
2546 * given the alg's descriptor header template, determine whether descriptor
2547 * type and primary/secondary execution units required match the hw
2548 * capabilities description provided in the device tree node.
2549 */
2550static int hw_supports(struct device *dev, __be32 desc_hdr_template)
2551{
2552	struct talitos_private *priv = dev_get_drvdata(dev);
2553	int ret;
2554
2555	ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
2556	      (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
2557
2558	if (SECONDARY_EU(desc_hdr_template))
2559		ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
2560		              & priv->exec_units);
2561
2562	return ret;
2563}
2564
2565static int talitos_remove(struct platform_device *ofdev)
2566{
2567	struct device *dev = &ofdev->dev;
2568	struct talitos_private *priv = dev_get_drvdata(dev);
2569	struct talitos_crypto_alg *t_alg, *n;
2570	int i;
2571
2572	list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
2573		switch (t_alg->algt.type) {
2574		case CRYPTO_ALG_TYPE_ABLKCIPHER:
 
 
2575		case CRYPTO_ALG_TYPE_AEAD:
2576			crypto_unregister_alg(&t_alg->algt.alg.crypto);
2577			break;
2578		case CRYPTO_ALG_TYPE_AHASH:
2579			crypto_unregister_ahash(&t_alg->algt.alg.hash);
2580			break;
2581		}
2582		list_del(&t_alg->entry);
2583		kfree(t_alg);
2584	}
2585
2586	if (hw_supports(dev, DESC_HDR_SEL0_RNG))
2587		talitos_unregister_rng(dev);
2588
2589	for (i = 0; i < priv->num_channels; i++)
2590		kfree(priv->chan[i].fifo);
2591
2592	kfree(priv->chan);
2593
2594	for (i = 0; i < 2; i++)
2595		if (priv->irq[i]) {
2596			free_irq(priv->irq[i], dev);
2597			irq_dispose_mapping(priv->irq[i]);
2598		}
2599
2600	tasklet_kill(&priv->done_task[0]);
2601	if (priv->irq[1])
2602		tasklet_kill(&priv->done_task[1]);
2603
2604	iounmap(priv->reg);
2605
2606	dev_set_drvdata(dev, NULL);
2607
2608	kfree(priv);
2609
2610	return 0;
2611}
2612
2613static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
2614						    struct talitos_alg_template
2615						           *template)
2616{
2617	struct talitos_private *priv = dev_get_drvdata(dev);
2618	struct talitos_crypto_alg *t_alg;
2619	struct crypto_alg *alg;
2620
2621	t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
 
2622	if (!t_alg)
2623		return ERR_PTR(-ENOMEM);
2624
2625	t_alg->algt = *template;
2626
2627	switch (t_alg->algt.type) {
2628	case CRYPTO_ALG_TYPE_ABLKCIPHER:
2629		alg = &t_alg->algt.alg.crypto;
2630		alg->cra_init = talitos_cra_init;
 
 
 
 
 
2631		break;
2632	case CRYPTO_ALG_TYPE_AEAD:
2633		alg = &t_alg->algt.alg.crypto;
2634		alg->cra_init = talitos_cra_init_aead;
 
 
 
 
 
 
 
 
 
 
2635		break;
2636	case CRYPTO_ALG_TYPE_AHASH:
2637		alg = &t_alg->algt.alg.hash.halg.base;
2638		alg->cra_init = talitos_cra_init_ahash;
 
 
 
 
 
 
 
 
 
 
 
2639		if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
2640		    !strncmp(alg->cra_name, "hmac", 4)) {
2641			kfree(t_alg);
2642			return ERR_PTR(-ENOTSUPP);
2643		}
2644		if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
2645		    (!strcmp(alg->cra_name, "sha224") ||
2646		     !strcmp(alg->cra_name, "hmac(sha224)"))) {
2647			t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
2648			t_alg->algt.desc_hdr_template =
2649					DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2650					DESC_HDR_SEL0_MDEUA |
2651					DESC_HDR_MODE0_MDEU_SHA256;
2652		}
2653		break;
2654	default:
2655		dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
 
2656		return ERR_PTR(-EINVAL);
2657	}
2658
2659	alg->cra_module = THIS_MODULE;
2660	alg->cra_priority = TALITOS_CRA_PRIORITY;
2661	alg->cra_alignmask = 0;
 
 
 
 
 
 
2662	alg->cra_ctxsize = sizeof(struct talitos_ctx);
2663	alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
2664
2665	t_alg->dev = dev;
2666
2667	return t_alg;
2668}
2669
2670static int talitos_probe_irq(struct platform_device *ofdev)
2671{
2672	struct device *dev = &ofdev->dev;
2673	struct device_node *np = ofdev->dev.of_node;
2674	struct talitos_private *priv = dev_get_drvdata(dev);
2675	int err;
 
2676
2677	priv->irq[0] = irq_of_parse_and_map(np, 0);
2678	if (!priv->irq[0]) {
2679		dev_err(dev, "failed to map irq\n");
2680		return -EINVAL;
2681	}
 
 
 
 
 
2682
2683	priv->irq[1] = irq_of_parse_and_map(np, 1);
2684
2685	/* get the primary irq line */
2686	if (!priv->irq[1]) {
2687		err = request_irq(priv->irq[0], talitos_interrupt_4ch, 0,
2688				  dev_driver_string(dev), dev);
2689		goto primary_out;
2690	}
2691
2692	err = request_irq(priv->irq[0], talitos_interrupt_ch0_2, 0,
2693			  dev_driver_string(dev), dev);
2694	if (err)
2695		goto primary_out;
2696
2697	/* get the secondary irq line */
2698	err = request_irq(priv->irq[1], talitos_interrupt_ch1_3, 0,
2699			  dev_driver_string(dev), dev);
2700	if (err) {
2701		dev_err(dev, "failed to request secondary irq\n");
2702		irq_dispose_mapping(priv->irq[1]);
2703		priv->irq[1] = 0;
2704	}
2705
2706	return err;
2707
2708primary_out:
2709	if (err) {
2710		dev_err(dev, "failed to request primary irq\n");
2711		irq_dispose_mapping(priv->irq[0]);
2712		priv->irq[0] = 0;
2713	}
2714
2715	return err;
2716}
2717
2718static int talitos_probe(struct platform_device *ofdev)
2719{
2720	struct device *dev = &ofdev->dev;
2721	struct device_node *np = ofdev->dev.of_node;
2722	struct talitos_private *priv;
2723	const unsigned int *prop;
2724	int i, err;
 
 
2725
2726	priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
2727	if (!priv)
2728		return -ENOMEM;
2729
 
 
2730	dev_set_drvdata(dev, priv);
2731
2732	priv->ofdev = ofdev;
2733
2734	spin_lock_init(&priv->reg_lock);
2735
2736	err = talitos_probe_irq(ofdev);
2737	if (err)
2738		goto err_out;
2739
2740	if (!priv->irq[1]) {
2741		tasklet_init(&priv->done_task[0], talitos_done_4ch,
2742			     (unsigned long)dev);
2743	} else {
2744		tasklet_init(&priv->done_task[0], talitos_done_ch0_2,
2745			     (unsigned long)dev);
2746		tasklet_init(&priv->done_task[1], talitos_done_ch1_3,
2747			     (unsigned long)dev);
2748	}
2749
2750	INIT_LIST_HEAD(&priv->alg_list);
2751
2752	priv->reg = of_iomap(np, 0);
2753	if (!priv->reg) {
2754		dev_err(dev, "failed to of_iomap\n");
2755		err = -ENOMEM;
2756		goto err_out;
2757	}
2758
2759	/* get SEC version capabilities from device tree */
2760	prop = of_get_property(np, "fsl,num-channels", NULL);
2761	if (prop)
2762		priv->num_channels = *prop;
2763
2764	prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
2765	if (prop)
2766		priv->chfifo_len = *prop;
2767
2768	prop = of_get_property(np, "fsl,exec-units-mask", NULL);
2769	if (prop)
2770		priv->exec_units = *prop;
2771
2772	prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
2773	if (prop)
2774		priv->desc_types = *prop;
2775
2776	if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
2777	    !priv->exec_units || !priv->desc_types) {
2778		dev_err(dev, "invalid property data in device tree node\n");
2779		err = -EINVAL;
2780		goto err_out;
2781	}
2782
2783	if (of_device_is_compatible(np, "fsl,sec3.0"))
2784		priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
2785
2786	if (of_device_is_compatible(np, "fsl,sec2.1"))
2787		priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
2788				  TALITOS_FTR_SHA224_HWINIT |
2789				  TALITOS_FTR_HMAC_OK;
2790
2791	priv->chan = kzalloc(sizeof(struct talitos_channel) *
2792			     priv->num_channels, GFP_KERNEL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2793	if (!priv->chan) {
2794		dev_err(dev, "failed to allocate channel management space\n");
2795		err = -ENOMEM;
2796		goto err_out;
2797	}
2798
 
 
2799	for (i = 0; i < priv->num_channels; i++) {
2800		priv->chan[i].reg = priv->reg + TALITOS_CH_STRIDE * (i + 1);
2801		if (!priv->irq[1] || !(i & 1))
2802			priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
2803	}
2804
2805	for (i = 0; i < priv->num_channels; i++) {
2806		spin_lock_init(&priv->chan[i].head_lock);
2807		spin_lock_init(&priv->chan[i].tail_lock);
2808	}
2809
2810	priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
2811
2812	for (i = 0; i < priv->num_channels; i++) {
2813		priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
2814					     priv->fifo_len, GFP_KERNEL);
 
2815		if (!priv->chan[i].fifo) {
2816			dev_err(dev, "failed to allocate request fifo %d\n", i);
2817			err = -ENOMEM;
2818			goto err_out;
2819		}
2820	}
2821
2822	for (i = 0; i < priv->num_channels; i++)
2823		atomic_set(&priv->chan[i].submit_count,
2824			   -(priv->chfifo_len - 1));
 
2825
2826	dma_set_mask(dev, DMA_BIT_MASK(36));
2827
2828	/* reset and initialize the h/w */
2829	err = init_device(dev);
2830	if (err) {
2831		dev_err(dev, "failed to initialize device\n");
2832		goto err_out;
2833	}
2834
2835	/* register the RNG, if available */
2836	if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
2837		err = talitos_register_rng(dev);
2838		if (err) {
2839			dev_err(dev, "failed to register hwrng: %d\n", err);
2840			goto err_out;
2841		} else
2842			dev_info(dev, "hwrng\n");
2843	}
2844
2845	/* register crypto algorithms the device supports */
2846	for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2847		if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
2848			struct talitos_crypto_alg *t_alg;
2849			char *name = NULL;
2850
2851			t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
2852			if (IS_ERR(t_alg)) {
2853				err = PTR_ERR(t_alg);
2854				if (err == -ENOTSUPP)
2855					continue;
2856				goto err_out;
2857			}
2858
2859			switch (t_alg->algt.type) {
2860			case CRYPTO_ALG_TYPE_ABLKCIPHER:
 
 
 
 
 
2861			case CRYPTO_ALG_TYPE_AEAD:
2862				err = crypto_register_alg(
2863						&t_alg->algt.alg.crypto);
2864				name = t_alg->algt.alg.crypto.cra_driver_name;
2865				break;
 
2866			case CRYPTO_ALG_TYPE_AHASH:
2867				err = crypto_register_ahash(
2868						&t_alg->algt.alg.hash);
2869				name =
2870				 t_alg->algt.alg.hash.halg.base.cra_driver_name;
2871				break;
2872			}
2873			if (err) {
2874				dev_err(dev, "%s alg registration failed\n",
2875					name);
2876				kfree(t_alg);
2877			} else
2878				list_add_tail(&t_alg->entry, &priv->alg_list);
2879		}
2880	}
2881	if (!list_empty(&priv->alg_list))
2882		dev_info(dev, "%s algorithms registered in /proc/crypto\n",
2883			 (char *)of_get_property(np, "compatible", NULL));
2884
2885	return 0;
2886
2887err_out:
2888	talitos_remove(ofdev);
2889
2890	return err;
2891}
2892
2893static const struct of_device_id talitos_match[] = {
 
 
 
 
 
 
2894	{
2895		.compatible = "fsl,sec2.0",
2896	},
 
2897	{},
2898};
2899MODULE_DEVICE_TABLE(of, talitos_match);
2900
2901static struct platform_driver talitos_driver = {
2902	.driver = {
2903		.name = "talitos",
2904		.owner = THIS_MODULE,
2905		.of_match_table = talitos_match,
2906	},
2907	.probe = talitos_probe,
2908	.remove = talitos_remove,
2909};
2910
2911module_platform_driver(talitos_driver);
2912
2913MODULE_LICENSE("GPL");
2914MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
2915MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");