Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * K3 SA2UL crypto accelerator driver
   4 *
   5 * Copyright (C) 2018-2020 Texas Instruments Incorporated - http://www.ti.com
   6 *
   7 * Authors:	Keerthy
   8 *		Vitaly Andrianov
   9 *		Tero Kristo
  10 */
  11#include <linux/bitfield.h>
  12#include <linux/clk.h>
  13#include <linux/dma-mapping.h>
  14#include <linux/dmaengine.h>
  15#include <linux/dmapool.h>
  16#include <linux/kernel.h>
  17#include <linux/module.h>
  18#include <linux/of.h>
  19#include <linux/of_platform.h>
  20#include <linux/platform_device.h>
  21#include <linux/pm_runtime.h>
  22
  23#include <crypto/aes.h>
  24#include <crypto/authenc.h>
  25#include <crypto/des.h>
  26#include <crypto/internal/aead.h>
  27#include <crypto/internal/hash.h>
  28#include <crypto/internal/skcipher.h>
  29#include <crypto/scatterwalk.h>
  30#include <crypto/sha1.h>
  31#include <crypto/sha2.h>
  32
  33#include "sa2ul.h"
  34
  35/* Byte offset for key in encryption security context */
  36#define SC_ENC_KEY_OFFSET (1 + 27 + 4)
  37/* Byte offset for Aux-1 in encryption security context */
  38#define SC_ENC_AUX1_OFFSET (1 + 27 + 4 + 32)
  39
  40#define SA_CMDL_UPD_ENC         0x0001
  41#define SA_CMDL_UPD_AUTH        0x0002
  42#define SA_CMDL_UPD_ENC_IV      0x0004
  43#define SA_CMDL_UPD_AUTH_IV     0x0008
  44#define SA_CMDL_UPD_AUX_KEY     0x0010
  45
  46#define SA_AUTH_SUBKEY_LEN	16
  47#define SA_CMDL_PAYLOAD_LENGTH_MASK	0xFFFF
  48#define SA_CMDL_SOP_BYPASS_LEN_MASK	0xFF000000
  49
  50#define MODE_CONTROL_BYTES	27
  51#define SA_HASH_PROCESSING	0
  52#define SA_CRYPTO_PROCESSING	0
  53#define SA_UPLOAD_HASH_TO_TLR	BIT(6)
  54
  55#define SA_SW0_FLAGS_MASK	0xF0000
  56#define SA_SW0_CMDL_INFO_MASK	0x1F00000
  57#define SA_SW0_CMDL_PRESENT	BIT(4)
  58#define SA_SW0_ENG_ID_MASK	0x3E000000
  59#define SA_SW0_DEST_INFO_PRESENT	BIT(30)
  60#define SA_SW2_EGRESS_LENGTH		0xFF000000
  61#define SA_BASIC_HASH		0x10
  62
  63#define SHA256_DIGEST_WORDS    8
  64/* Make 32-bit word from 4 bytes */
  65#define SA_MK_U32(b0, b1, b2, b3) (((b0) << 24) | ((b1) << 16) | \
  66				   ((b2) << 8) | (b3))
  67
  68/* size of SCCTL structure in bytes */
  69#define SA_SCCTL_SZ 16
  70
  71/* Max Authentication tag size */
  72#define SA_MAX_AUTH_TAG_SZ 64
  73
  74enum sa_algo_id {
  75	SA_ALG_CBC_AES = 0,
  76	SA_ALG_EBC_AES,
  77	SA_ALG_CBC_DES3,
  78	SA_ALG_ECB_DES3,
  79	SA_ALG_SHA1,
  80	SA_ALG_SHA256,
  81	SA_ALG_SHA512,
  82	SA_ALG_AUTHENC_SHA1_AES,
  83	SA_ALG_AUTHENC_SHA256_AES,
  84};
  85
  86struct sa_match_data {
  87	u8 priv;
  88	u8 priv_id;
  89	u32 supported_algos;
  90};
  91
  92static struct device *sa_k3_dev;
  93
  94/**
  95 * struct sa_cmdl_cfg - Command label configuration descriptor
  96 * @aalg: authentication algorithm ID
  97 * @enc_eng_id: Encryption Engine ID supported by the SA hardware
  98 * @auth_eng_id: Authentication Engine ID
  99 * @iv_size: Initialization Vector size
 100 * @akey: Authentication key
 101 * @akey_len: Authentication key length
 102 * @enc: True, if this is an encode request
 103 */
 104struct sa_cmdl_cfg {
 105	int aalg;
 106	u8 enc_eng_id;
 107	u8 auth_eng_id;
 108	u8 iv_size;
 109	const u8 *akey;
 110	u16 akey_len;
 111	bool enc;
 112};
 113
 114/**
 115 * struct algo_data - Crypto algorithm specific data
 116 * @enc_eng: Encryption engine info structure
 117 * @auth_eng: Authentication engine info structure
 118 * @auth_ctrl: Authentication control word
 119 * @hash_size: Size of digest
 120 * @iv_idx: iv index in psdata
 121 * @iv_out_size: iv out size
 122 * @ealg_id: Encryption Algorithm ID
 123 * @aalg_id: Authentication algorithm ID
 124 * @mci_enc: Mode Control Instruction for Encryption algorithm
 125 * @mci_dec: Mode Control Instruction for Decryption
 126 * @inv_key: Whether the encryption algorithm demands key inversion
 127 * @ctx: Pointer to the algorithm context
 128 * @keyed_mac: Whether the authentication algorithm has key
 129 * @prep_iopad: Function pointer to generate intermediate ipad/opad
 130 */
 131struct algo_data {
 132	struct sa_eng_info enc_eng;
 133	struct sa_eng_info auth_eng;
 134	u8 auth_ctrl;
 135	u8 hash_size;
 136	u8 iv_idx;
 137	u8 iv_out_size;
 138	u8 ealg_id;
 139	u8 aalg_id;
 140	u8 *mci_enc;
 141	u8 *mci_dec;
 142	bool inv_key;
 143	struct sa_tfm_ctx *ctx;
 144	bool keyed_mac;
 145	void (*prep_iopad)(struct algo_data *algo, const u8 *key,
 146			   u16 key_sz, __be32 *ipad, __be32 *opad);
 147};
 148
 149/**
 150 * struct sa_alg_tmpl: A generic template encompassing crypto/aead algorithms
 151 * @type: Type of the crypto algorithm.
 152 * @alg: Union of crypto algorithm definitions.
 153 * @registered: Flag indicating if the crypto algorithm is already registered
 154 */
 155struct sa_alg_tmpl {
 156	u32 type;		/* CRYPTO_ALG_TYPE from <linux/crypto.h> */
 157	union {
 158		struct skcipher_alg skcipher;
 159		struct ahash_alg ahash;
 160		struct aead_alg aead;
 161	} alg;
 162	bool registered;
 163};
 164
 165/**
 166 * struct sa_mapped_sg: scatterlist information for tx and rx
 167 * @mapped: Set to true if the @sgt is mapped
 168 * @dir: mapping direction used for @sgt
 169 * @split_sg: Set if the sg is split and needs to be freed up
 170 * @static_sg: Static scatterlist entry for overriding data
 171 * @sgt: scatterlist table for DMA API use
 172 */
 173struct sa_mapped_sg {
 174	bool mapped;
 175	enum dma_data_direction dir;
 176	struct scatterlist static_sg;
 177	struct scatterlist *split_sg;
 178	struct sg_table sgt;
 179};
 180/**
 181 * struct sa_rx_data: RX Packet miscellaneous data place holder
 182 * @req: crypto request data pointer
 183 * @ddev: pointer to the DMA device
 184 * @tx_in: dma_async_tx_descriptor pointer for rx channel
 185 * @mapped_sg: Information on tx (0) and rx (1) scatterlist DMA mapping
 
 186 * @enc: Flag indicating either encryption or decryption
 187 * @enc_iv_size: Initialisation vector size
 188 * @iv_idx: Initialisation vector index
 
 
 
 
 189 */
 190struct sa_rx_data {
 191	void *req;
 192	struct device *ddev;
 193	struct dma_async_tx_descriptor *tx_in;
 194	struct sa_mapped_sg mapped_sg[2];
 
 195	u8 enc;
 196	u8 enc_iv_size;
 197	u8 iv_idx;
 
 
 
 
 198};
 199
 200/**
 201 * struct sa_req: SA request definition
 202 * @dev: device for the request
 203 * @size: total data to the xmitted via DMA
 204 * @enc_offset: offset of cipher data
 205 * @enc_size: data to be passed to cipher engine
 206 * @enc_iv: cipher IV
 207 * @auth_offset: offset of the authentication data
 208 * @auth_size: size of the authentication data
 209 * @auth_iv: authentication IV
 210 * @type: algorithm type for the request
 211 * @cmdl: command label pointer
 212 * @base: pointer to the base request
 213 * @ctx: pointer to the algorithm context data
 214 * @enc: true if this is an encode request
 215 * @src: source data
 216 * @dst: destination data
 217 * @callback: DMA callback for the request
 218 * @mdata_size: metadata size passed to DMA
 219 */
 220struct sa_req {
 221	struct device *dev;
 222	u16 size;
 223	u8 enc_offset;
 224	u16 enc_size;
 225	u8 *enc_iv;
 226	u8 auth_offset;
 227	u16 auth_size;
 228	u8 *auth_iv;
 229	u32 type;
 230	u32 *cmdl;
 231	struct crypto_async_request *base;
 232	struct sa_tfm_ctx *ctx;
 233	bool enc;
 234	struct scatterlist *src;
 235	struct scatterlist *dst;
 236	dma_async_tx_callback callback;
 237	u16 mdata_size;
 238};
 239
 240/*
 241 * Mode Control Instructions for various Key lengths 128, 192, 256
 242 * For CBC (Cipher Block Chaining) mode for encryption
 243 */
 244static u8 mci_cbc_enc_array[3][MODE_CONTROL_BYTES] = {
 245	{	0x61, 0x00, 0x00, 0x18, 0x88, 0x0a, 0xaa, 0x4b, 0x7e, 0x00,
 246		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 247		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
 248	{	0x61, 0x00, 0x00, 0x18, 0x88, 0x4a, 0xaa, 0x4b, 0x7e, 0x00,
 249		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 250		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
 251	{	0x61, 0x00, 0x00, 0x18, 0x88, 0x8a, 0xaa, 0x4b, 0x7e, 0x00,
 252		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 253		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
 254};
 255
 256/*
 257 * Mode Control Instructions for various Key lengths 128, 192, 256
 258 * For CBC (Cipher Block Chaining) mode for decryption
 259 */
 260static u8 mci_cbc_dec_array[3][MODE_CONTROL_BYTES] = {
 261	{	0x71, 0x00, 0x00, 0x80, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
 262		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 263		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
 264	{	0x71, 0x00, 0x00, 0x84, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
 265		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 266		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
 267	{	0x71, 0x00, 0x00, 0x88, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
 268		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 269		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
 270};
 271
 272/*
 273 * Mode Control Instructions for various Key lengths 128, 192, 256
 274 * For CBC (Cipher Block Chaining) mode for encryption
 275 */
 276static u8 mci_cbc_enc_no_iv_array[3][MODE_CONTROL_BYTES] = {
 277	{	0x21, 0x00, 0x00, 0x18, 0x88, 0x0a, 0xaa, 0x4b, 0x7e, 0x00,
 278		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 279		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
 280	{	0x21, 0x00, 0x00, 0x18, 0x88, 0x4a, 0xaa, 0x4b, 0x7e, 0x00,
 281		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 282		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
 283	{	0x21, 0x00, 0x00, 0x18, 0x88, 0x8a, 0xaa, 0x4b, 0x7e, 0x00,
 284		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 285		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
 286};
 287
 288/*
 289 * Mode Control Instructions for various Key lengths 128, 192, 256
 290 * For CBC (Cipher Block Chaining) mode for decryption
 291 */
 292static u8 mci_cbc_dec_no_iv_array[3][MODE_CONTROL_BYTES] = {
 293	{	0x31, 0x00, 0x00, 0x80, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
 294		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 295		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
 296	{	0x31, 0x00, 0x00, 0x84, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
 297		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 298		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
 299	{	0x31, 0x00, 0x00, 0x88, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
 300		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 301		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
 302};
 303
 304/*
 305 * Mode Control Instructions for various Key lengths 128, 192, 256
 306 * For ECB (Electronic Code Book) mode for encryption
 307 */
 308static u8 mci_ecb_enc_array[3][27] = {
 309	{	0x21, 0x00, 0x00, 0x80, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
 310		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 311		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
 312	{	0x21, 0x00, 0x00, 0x84, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
 313		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 314		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
 315	{	0x21, 0x00, 0x00, 0x88, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
 316		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 317		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
 318};
 319
 320/*
 321 * Mode Control Instructions for various Key lengths 128, 192, 256
 322 * For ECB (Electronic Code Book) mode for decryption
 323 */
 324static u8 mci_ecb_dec_array[3][27] = {
 325	{	0x31, 0x00, 0x00, 0x80, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
 326		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 327		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
 328	{	0x31, 0x00, 0x00, 0x84, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
 329		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 330		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
 331	{	0x31, 0x00, 0x00, 0x88, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
 332		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 333		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
 334};
 335
 336/*
 337 * Mode Control Instructions for DES algorithm
 338 * For CBC (Cipher Block Chaining) mode and ECB mode
 339 * encryption and for decryption respectively
 340 */
 341static u8 mci_cbc_3des_enc_array[MODE_CONTROL_BYTES] = {
 342	0x60, 0x00, 0x00, 0x18, 0x88, 0x52, 0xaa, 0x4b, 0x7e, 0x00, 0x00, 0x00,
 343	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 344	0x00, 0x00, 0x00,
 345};
 346
 347static u8 mci_cbc_3des_dec_array[MODE_CONTROL_BYTES] = {
 348	0x70, 0x00, 0x00, 0x85, 0x0a, 0xca, 0x98, 0xf4, 0x40, 0xc0, 0x00, 0x00,
 349	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 350	0x00, 0x00, 0x00,
 351};
 352
 353static u8 mci_ecb_3des_enc_array[MODE_CONTROL_BYTES] = {
 354	0x20, 0x00, 0x00, 0x85, 0x0a, 0x04, 0xb7, 0x90, 0x00, 0x00, 0x00, 0x00,
 355	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 356	0x00, 0x00, 0x00,
 357};
 358
 359static u8 mci_ecb_3des_dec_array[MODE_CONTROL_BYTES] = {
 360	0x30, 0x00, 0x00, 0x85, 0x0a, 0x04, 0xb7, 0x90, 0x00, 0x00, 0x00, 0x00,
 361	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 362	0x00, 0x00, 0x00,
 363};
 364
 365/*
 366 * Perform 16 byte or 128 bit swizzling
 367 * The SA2UL Expects the security context to
 368 * be in little Endian and the bus width is 128 bits or 16 bytes
 369 * Hence swap 16 bytes at a time from higher to lower address
 370 */
 371static void sa_swiz_128(u8 *in, u16 len)
 372{
 373	u8 data[16];
 374	int i, j;
 375
 376	for (i = 0; i < len; i += 16) {
 377		memcpy(data, &in[i], 16);
 378		for (j = 0; j < 16; j++)
 379			in[i + j] = data[15 - j];
 380	}
 381}
 382
 383/* Prepare the ipad and opad from key as per SHA algorithm step 1*/
 384static void prepare_kipad(u8 *k_ipad, const u8 *key, u16 key_sz)
 385{
 386	int i;
 387
 388	for (i = 0; i < key_sz; i++)
 389		k_ipad[i] = key[i] ^ 0x36;
 390
 391	/* Instead of XOR with 0 */
 392	for (; i < SHA1_BLOCK_SIZE; i++)
 393		k_ipad[i] = 0x36;
 394}
 395
 396static void prepare_kopad(u8 *k_opad, const u8 *key, u16 key_sz)
 397{
 398	int i;
 399
 400	for (i = 0; i < key_sz; i++)
 401		k_opad[i] = key[i] ^ 0x5c;
 
 402
 403	/* Instead of XOR with 0 */
 404	for (; i < SHA1_BLOCK_SIZE; i++)
 
 405		k_opad[i] = 0x5c;
 
 406}
 407
 408static void sa_export_shash(void *state, struct shash_desc *hash,
 409			    int digest_size, __be32 *out)
 410{
 411	struct sha1_state *sha1;
 412	struct sha256_state *sha256;
 
 
 
 
 413	u32 *result;
 
 414
 415	switch (digest_size) {
 416	case SHA1_DIGEST_SIZE:
 417		sha1 = state;
 418		result = sha1->state;
 419		break;
 420	case SHA256_DIGEST_SIZE:
 421		sha256 = state;
 422		result = sha256->state;
 423		break;
 424	default:
 425		dev_err(sa_k3_dev, "%s: bad digest_size=%d\n", __func__,
 426			digest_size);
 427		return;
 428	}
 429
 430	crypto_shash_export(hash, state);
 431
 432	cpu_to_be32_array(out, result, digest_size / 4);
 
 433}
 434
 435static void sa_prepare_iopads(struct algo_data *data, const u8 *key,
 436			      u16 key_sz, __be32 *ipad, __be32 *opad)
 437{
 438	SHASH_DESC_ON_STACK(shash, data->ctx->shash);
 439	int block_size = crypto_shash_blocksize(data->ctx->shash);
 440	int digest_size = crypto_shash_digestsize(data->ctx->shash);
 441	union {
 442		struct sha1_state sha1;
 443		struct sha256_state sha256;
 444		u8 k_pad[SHA1_BLOCK_SIZE];
 445	} sha;
 446
 447	shash->tfm = data->ctx->shash;
 448
 449	prepare_kipad(sha.k_pad, key, key_sz);
 450
 451	crypto_shash_init(shash);
 452	crypto_shash_update(shash, sha.k_pad, block_size);
 453	sa_export_shash(&sha, shash, digest_size, ipad);
 454
 455	prepare_kopad(sha.k_pad, key, key_sz);
 
 456
 457	crypto_shash_init(shash);
 458	crypto_shash_update(shash, sha.k_pad, block_size);
 
 459
 460	sa_export_shash(&sha, shash, digest_size, opad);
 
 461
 462	memzero_explicit(&sha, sizeof(sha));
 463}
 464
 465/* Derive the inverse key used in AES-CBC decryption operation */
 466static inline int sa_aes_inv_key(u8 *inv_key, const u8 *key, u16 key_sz)
 467{
 468	struct crypto_aes_ctx ctx;
 469	int key_pos;
 470
 471	if (aes_expandkey(&ctx, key, key_sz)) {
 472		dev_err(sa_k3_dev, "%s: bad key len(%d)\n", __func__, key_sz);
 473		return -EINVAL;
 474	}
 475
 476	/* work around to get the right inverse for AES_KEYSIZE_192 size keys */
 477	if (key_sz == AES_KEYSIZE_192) {
 478		ctx.key_enc[52] = ctx.key_enc[51] ^ ctx.key_enc[46];
 479		ctx.key_enc[53] = ctx.key_enc[52] ^ ctx.key_enc[47];
 480	}
 481
 482	/* Based crypto_aes_expand_key logic */
 483	switch (key_sz) {
 484	case AES_KEYSIZE_128:
 485	case AES_KEYSIZE_192:
 486		key_pos = key_sz + 24;
 487		break;
 488
 489	case AES_KEYSIZE_256:
 490		key_pos = key_sz + 24 - 4;
 491		break;
 492
 493	default:
 494		dev_err(sa_k3_dev, "%s: bad key len(%d)\n", __func__, key_sz);
 495		return -EINVAL;
 496	}
 497
 498	memcpy(inv_key, &ctx.key_enc[key_pos], key_sz);
 499	return 0;
 500}
 501
 502/* Set Security context for the encryption engine */
 503static int sa_set_sc_enc(struct algo_data *ad, const u8 *key, u16 key_sz,
 504			 u8 enc, u8 *sc_buf)
 505{
 506	const u8 *mci = NULL;
 507
 508	/* Set Encryption mode selector to crypto processing */
 509	sc_buf[0] = SA_CRYPTO_PROCESSING;
 510
 511	if (enc)
 512		mci = ad->mci_enc;
 513	else
 514		mci = ad->mci_dec;
 515	/* Set the mode control instructions in security context */
 516	if (mci)
 517		memcpy(&sc_buf[1], mci, MODE_CONTROL_BYTES);
 518
 519	/* For AES-CBC decryption get the inverse key */
 520	if (ad->inv_key && !enc) {
 521		if (sa_aes_inv_key(&sc_buf[SC_ENC_KEY_OFFSET], key, key_sz))
 522			return -EINVAL;
 523	/* For all other cases: key is used */
 524	} else {
 525		memcpy(&sc_buf[SC_ENC_KEY_OFFSET], key, key_sz);
 526	}
 527
 528	return 0;
 529}
 530
 531/* Set Security context for the authentication engine */
 532static void sa_set_sc_auth(struct algo_data *ad, const u8 *key, u16 key_sz,
 533			   u8 *sc_buf)
 534{
 535	__be32 *ipad = (void *)(sc_buf + 32);
 536	__be32 *opad = (void *)(sc_buf + 64);
 537
 538	/* Set Authentication mode selector to hash processing */
 539	sc_buf[0] = SA_HASH_PROCESSING;
 540	/* Auth SW ctrl word: bit[6]=1 (upload computed hash to TLR section) */
 541	sc_buf[1] = SA_UPLOAD_HASH_TO_TLR;
 542	sc_buf[1] |= ad->auth_ctrl;
 543
 544	/* Copy the keys or ipad/opad */
 545	if (ad->keyed_mac)
 546		ad->prep_iopad(ad, key, key_sz, ipad, opad);
 547	else {
 
 
 
 
 
 548		/* basic hash */
 549		sc_buf[1] |= SA_BASIC_HASH;
 550	}
 551}
 552
 553static inline void sa_copy_iv(__be32 *out, const u8 *iv, bool size16)
 554{
 555	int j;
 556
 557	for (j = 0; j < ((size16) ? 4 : 2); j++) {
 558		*out = cpu_to_be32(*((u32 *)iv));
 559		iv += 4;
 560		out++;
 561	}
 562}
 563
 564/* Format general command label */
 565static int sa_format_cmdl_gen(struct sa_cmdl_cfg *cfg, u8 *cmdl,
 566			      struct sa_cmdl_upd_info *upd_info)
 567{
 568	u8 enc_offset = 0, auth_offset = 0, total = 0;
 569	u8 enc_next_eng = SA_ENG_ID_OUTPORT2;
 570	u8 auth_next_eng = SA_ENG_ID_OUTPORT2;
 571	u32 *word_ptr = (u32 *)cmdl;
 572	int i;
 573
 574	/* Clear the command label */
 575	memzero_explicit(cmdl, (SA_MAX_CMDL_WORDS * sizeof(u32)));
 576
 577	/* Iniialize the command update structure */
 578	memzero_explicit(upd_info, sizeof(*upd_info));
 579
 580	if (cfg->enc_eng_id && cfg->auth_eng_id) {
 581		if (cfg->enc) {
 582			auth_offset = SA_CMDL_HEADER_SIZE_BYTES;
 583			enc_next_eng = cfg->auth_eng_id;
 584
 585			if (cfg->iv_size)
 586				auth_offset += cfg->iv_size;
 587		} else {
 588			enc_offset = SA_CMDL_HEADER_SIZE_BYTES;
 589			auth_next_eng = cfg->enc_eng_id;
 590		}
 591	}
 592
 593	if (cfg->enc_eng_id) {
 594		upd_info->flags |= SA_CMDL_UPD_ENC;
 595		upd_info->enc_size.index = enc_offset >> 2;
 596		upd_info->enc_offset.index = upd_info->enc_size.index + 1;
 597		/* Encryption command label */
 598		cmdl[enc_offset + SA_CMDL_OFFSET_NESC] = enc_next_eng;
 599
 600		/* Encryption modes requiring IV */
 601		if (cfg->iv_size) {
 602			upd_info->flags |= SA_CMDL_UPD_ENC_IV;
 603			upd_info->enc_iv.index =
 604				(enc_offset + SA_CMDL_HEADER_SIZE_BYTES) >> 2;
 605			upd_info->enc_iv.size = cfg->iv_size;
 606
 607			cmdl[enc_offset + SA_CMDL_OFFSET_LABEL_LEN] =
 608				SA_CMDL_HEADER_SIZE_BYTES + cfg->iv_size;
 609
 610			cmdl[enc_offset + SA_CMDL_OFFSET_OPTION_CTRL1] =
 611				(SA_CTX_ENC_AUX2_OFFSET | (cfg->iv_size >> 3));
 612			total += SA_CMDL_HEADER_SIZE_BYTES + cfg->iv_size;
 613		} else {
 614			cmdl[enc_offset + SA_CMDL_OFFSET_LABEL_LEN] =
 615						SA_CMDL_HEADER_SIZE_BYTES;
 616			total += SA_CMDL_HEADER_SIZE_BYTES;
 617		}
 618	}
 619
 620	if (cfg->auth_eng_id) {
 621		upd_info->flags |= SA_CMDL_UPD_AUTH;
 622		upd_info->auth_size.index = auth_offset >> 2;
 623		upd_info->auth_offset.index = upd_info->auth_size.index + 1;
 624		cmdl[auth_offset + SA_CMDL_OFFSET_NESC] = auth_next_eng;
 625		cmdl[auth_offset + SA_CMDL_OFFSET_LABEL_LEN] =
 626			SA_CMDL_HEADER_SIZE_BYTES;
 627		total += SA_CMDL_HEADER_SIZE_BYTES;
 628	}
 629
 630	total = roundup(total, 8);
 631
 632	for (i = 0; i < total / 4; i++)
 633		word_ptr[i] = swab32(word_ptr[i]);
 634
 635	return total;
 636}
 637
 638/* Update Command label */
 639static inline void sa_update_cmdl(struct sa_req *req, u32 *cmdl,
 640				  struct sa_cmdl_upd_info *upd_info)
 641{
 642	int i = 0, j;
 643
 644	if (likely(upd_info->flags & SA_CMDL_UPD_ENC)) {
 645		cmdl[upd_info->enc_size.index] &= ~SA_CMDL_PAYLOAD_LENGTH_MASK;
 646		cmdl[upd_info->enc_size.index] |= req->enc_size;
 647		cmdl[upd_info->enc_offset.index] &=
 648						~SA_CMDL_SOP_BYPASS_LEN_MASK;
 649		cmdl[upd_info->enc_offset.index] |=
 650			FIELD_PREP(SA_CMDL_SOP_BYPASS_LEN_MASK,
 651				   req->enc_offset);
 652
 653		if (likely(upd_info->flags & SA_CMDL_UPD_ENC_IV)) {
 654			__be32 *data = (__be32 *)&cmdl[upd_info->enc_iv.index];
 655			u32 *enc_iv = (u32 *)req->enc_iv;
 656
 657			for (j = 0; i < upd_info->enc_iv.size; i += 4, j++) {
 658				data[j] = cpu_to_be32(*enc_iv);
 659				enc_iv++;
 660			}
 661		}
 662	}
 663
 664	if (likely(upd_info->flags & SA_CMDL_UPD_AUTH)) {
 665		cmdl[upd_info->auth_size.index] &= ~SA_CMDL_PAYLOAD_LENGTH_MASK;
 666		cmdl[upd_info->auth_size.index] |= req->auth_size;
 667		cmdl[upd_info->auth_offset.index] &=
 668			~SA_CMDL_SOP_BYPASS_LEN_MASK;
 669		cmdl[upd_info->auth_offset.index] |=
 670			FIELD_PREP(SA_CMDL_SOP_BYPASS_LEN_MASK,
 671				   req->auth_offset);
 672		if (upd_info->flags & SA_CMDL_UPD_AUTH_IV) {
 673			sa_copy_iv((void *)&cmdl[upd_info->auth_iv.index],
 674				   req->auth_iv,
 675				   (upd_info->auth_iv.size > 8));
 676		}
 677		if (upd_info->flags & SA_CMDL_UPD_AUX_KEY) {
 678			int offset = (req->auth_size & 0xF) ? 4 : 0;
 679
 680			memcpy(&cmdl[upd_info->aux_key_info.index],
 681			       &upd_info->aux_key[offset], 16);
 682		}
 683	}
 684}
 685
 686/* Format SWINFO words to be sent to SA */
 687static
 688void sa_set_swinfo(u8 eng_id, u16 sc_id, dma_addr_t sc_phys,
 689		   u8 cmdl_present, u8 cmdl_offset, u8 flags,
 690		   u8 hash_size, u32 *swinfo)
 691{
 692	swinfo[0] = sc_id;
 693	swinfo[0] |= FIELD_PREP(SA_SW0_FLAGS_MASK, flags);
 694	if (likely(cmdl_present))
 695		swinfo[0] |= FIELD_PREP(SA_SW0_CMDL_INFO_MASK,
 696					cmdl_offset | SA_SW0_CMDL_PRESENT);
 697	swinfo[0] |= FIELD_PREP(SA_SW0_ENG_ID_MASK, eng_id);
 698
 699	swinfo[0] |= SA_SW0_DEST_INFO_PRESENT;
 700	swinfo[1] = (u32)(sc_phys & 0xFFFFFFFFULL);
 701	swinfo[2] = (u32)((sc_phys & 0xFFFFFFFF00000000ULL) >> 32);
 702	swinfo[2] |= FIELD_PREP(SA_SW2_EGRESS_LENGTH, hash_size);
 703}
 704
 705/* Dump the security context */
 706static void sa_dump_sc(u8 *buf, dma_addr_t dma_addr)
 707{
 708#ifdef DEBUG
 709	dev_info(sa_k3_dev, "Security context dump:: 0x%pad\n", &dma_addr);
 710	print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
 711		       16, 1, buf, SA_CTX_MAX_SZ, false);
 712#endif
 713}
 714
 715static
 716int sa_init_sc(struct sa_ctx_info *ctx, const struct sa_match_data *match_data,
 717	       const u8 *enc_key, u16 enc_key_sz,
 718	       const u8 *auth_key, u16 auth_key_sz,
 719	       struct algo_data *ad, u8 enc, u32 *swinfo)
 720{
 721	int enc_sc_offset = 0;
 722	int auth_sc_offset = 0;
 723	u8 *sc_buf = ctx->sc;
 724	u16 sc_id = ctx->sc_id;
 725	u8 first_engine = 0;
 726
 727	memzero_explicit(sc_buf, SA_CTX_MAX_SZ);
 728
 729	if (ad->auth_eng.eng_id) {
 730		if (enc)
 731			first_engine = ad->enc_eng.eng_id;
 732		else
 733			first_engine = ad->auth_eng.eng_id;
 734
 735		enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ;
 736		auth_sc_offset = enc_sc_offset + ad->enc_eng.sc_size;
 737		sc_buf[1] = SA_SCCTL_FE_AUTH_ENC;
 738		if (!ad->hash_size)
 739			return -EINVAL;
 740		ad->hash_size = roundup(ad->hash_size, 8);
 741
 742	} else if (ad->enc_eng.eng_id && !ad->auth_eng.eng_id) {
 743		enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ;
 744		first_engine = ad->enc_eng.eng_id;
 745		sc_buf[1] = SA_SCCTL_FE_ENC;
 746		ad->hash_size = ad->iv_out_size;
 747	}
 748
 749	/* SCCTL Owner info: 0=host, 1=CP_ACE */
 750	sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0;
 751	memcpy(&sc_buf[2], &sc_id, 2);
 752	sc_buf[4] = 0x0;
 753	sc_buf[5] = match_data->priv_id;
 754	sc_buf[6] = match_data->priv;
 755	sc_buf[7] = 0x0;
 756
 757	/* Prepare context for encryption engine */
 758	if (ad->enc_eng.sc_size) {
 759		if (sa_set_sc_enc(ad, enc_key, enc_key_sz, enc,
 760				  &sc_buf[enc_sc_offset]))
 761			return -EINVAL;
 762	}
 763
 764	/* Prepare context for authentication engine */
 765	if (ad->auth_eng.sc_size)
 766		sa_set_sc_auth(ad, auth_key, auth_key_sz,
 767			       &sc_buf[auth_sc_offset]);
 768
 769	/* Set the ownership of context to CP_ACE */
 770	sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0x80;
 771
 772	/* swizzle the security context */
 773	sa_swiz_128(sc_buf, SA_CTX_MAX_SZ);
 774
 775	sa_set_swinfo(first_engine, ctx->sc_id, ctx->sc_phys, 1, 0,
 776		      SA_SW_INFO_FLAG_EVICT, ad->hash_size, swinfo);
 777
 778	sa_dump_sc(sc_buf, ctx->sc_phys);
 779
 780	return 0;
 781}
 782
 783/* Free the per direction context memory */
 784static void sa_free_ctx_info(struct sa_ctx_info *ctx,
 785			     struct sa_crypto_data *data)
 786{
 787	unsigned long bn;
 788
 789	bn = ctx->sc_id - data->sc_id_start;
 790	spin_lock(&data->scid_lock);
 791	__clear_bit(bn, data->ctx_bm);
 792	data->sc_id--;
 793	spin_unlock(&data->scid_lock);
 794
 795	if (ctx->sc) {
 796		dma_pool_free(data->sc_pool, ctx->sc, ctx->sc_phys);
 797		ctx->sc = NULL;
 798	}
 799}
 800
 801static int sa_init_ctx_info(struct sa_ctx_info *ctx,
 802			    struct sa_crypto_data *data)
 803{
 804	unsigned long bn;
 805	int err;
 806
 807	spin_lock(&data->scid_lock);
 808	bn = find_first_zero_bit(data->ctx_bm, SA_MAX_NUM_CTX);
 809	__set_bit(bn, data->ctx_bm);
 810	data->sc_id++;
 811	spin_unlock(&data->scid_lock);
 812
 813	ctx->sc_id = (u16)(data->sc_id_start + bn);
 814
 815	ctx->sc = dma_pool_alloc(data->sc_pool, GFP_KERNEL, &ctx->sc_phys);
 816	if (!ctx->sc) {
 817		dev_err(&data->pdev->dev, "Failed to allocate SC memory\n");
 818		err = -ENOMEM;
 819		goto scid_rollback;
 820	}
 821
 822	return 0;
 823
 824scid_rollback:
 825	spin_lock(&data->scid_lock);
 826	__clear_bit(bn, data->ctx_bm);
 827	data->sc_id--;
 828	spin_unlock(&data->scid_lock);
 829
 830	return err;
 831}
 832
 833static void sa_cipher_cra_exit(struct crypto_skcipher *tfm)
 834{
 835	struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
 836	struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
 837
 838	dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
 839		__func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
 840		ctx->dec.sc_id, &ctx->dec.sc_phys);
 841
 842	sa_free_ctx_info(&ctx->enc, data);
 843	sa_free_ctx_info(&ctx->dec, data);
 844
 845	crypto_free_skcipher(ctx->fallback.skcipher);
 846}
 847
 848static int sa_cipher_cra_init(struct crypto_skcipher *tfm)
 849{
 850	struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
 851	struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
 852	const char *name = crypto_tfm_alg_name(&tfm->base);
 853	struct crypto_skcipher *child;
 854	int ret;
 855
 856	memzero_explicit(ctx, sizeof(*ctx));
 857	ctx->dev_data = data;
 858
 859	ret = sa_init_ctx_info(&ctx->enc, data);
 860	if (ret)
 861		return ret;
 862	ret = sa_init_ctx_info(&ctx->dec, data);
 863	if (ret) {
 864		sa_free_ctx_info(&ctx->enc, data);
 865		return ret;
 866	}
 867
 868	child = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
 
 869
 870	if (IS_ERR(child)) {
 871		dev_err(sa_k3_dev, "Error allocating fallback algo %s\n", name);
 872		return PTR_ERR(child);
 873	}
 874
 875	ctx->fallback.skcipher = child;
 876	crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(child) +
 877					 sizeof(struct skcipher_request));
 878
 879	dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
 880		__func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
 881		ctx->dec.sc_id, &ctx->dec.sc_phys);
 882	return 0;
 883}
 884
 885static int sa_cipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
 886			    unsigned int keylen, struct algo_data *ad)
 887{
 888	struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
 889	struct crypto_skcipher *child = ctx->fallback.skcipher;
 890	int cmdl_len;
 891	struct sa_cmdl_cfg cfg;
 892	int ret;
 893
 894	if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
 895	    keylen != AES_KEYSIZE_256)
 896		return -EINVAL;
 897
 898	ad->enc_eng.eng_id = SA_ENG_ID_EM1;
 899	ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
 900
 901	memzero_explicit(&cfg, sizeof(cfg));
 902	cfg.enc_eng_id = ad->enc_eng.eng_id;
 903	cfg.iv_size = crypto_skcipher_ivsize(tfm);
 904
 905	crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
 906	crypto_skcipher_set_flags(child, tfm->base.crt_flags &
 907					 CRYPTO_TFM_REQ_MASK);
 908	ret = crypto_skcipher_setkey(child, key, keylen);
 
 
 
 909	if (ret)
 910		return ret;
 911
 912	/* Setup Encryption Security Context & Command label template */
 913	if (sa_init_sc(&ctx->enc, ctx->dev_data->match_data, key, keylen, NULL, 0,
 914		       ad, 1, &ctx->enc.epib[1]))
 915		goto badkey;
 916
 917	cmdl_len = sa_format_cmdl_gen(&cfg,
 918				      (u8 *)ctx->enc.cmdl,
 919				      &ctx->enc.cmdl_upd_info);
 920	if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
 921		goto badkey;
 922
 923	ctx->enc.cmdl_size = cmdl_len;
 924
 925	/* Setup Decryption Security Context & Command label template */
 926	if (sa_init_sc(&ctx->dec, ctx->dev_data->match_data, key, keylen, NULL, 0,
 927		       ad, 0, &ctx->dec.epib[1]))
 928		goto badkey;
 929
 930	cfg.enc_eng_id = ad->enc_eng.eng_id;
 931	cmdl_len = sa_format_cmdl_gen(&cfg, (u8 *)ctx->dec.cmdl,
 932				      &ctx->dec.cmdl_upd_info);
 933
 934	if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
 935		goto badkey;
 936
 937	ctx->dec.cmdl_size = cmdl_len;
 938	ctx->iv_idx = ad->iv_idx;
 939
 940	return 0;
 941
 942badkey:
 943	dev_err(sa_k3_dev, "%s: badkey\n", __func__);
 944	return -EINVAL;
 945}
 946
 947static int sa_aes_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key,
 948			     unsigned int keylen)
 949{
 950	struct algo_data ad = { 0 };
 951	/* Convert the key size (16/24/32) to the key size index (0/1/2) */
 952	int key_idx = (keylen >> 3) - 2;
 953
 954	if (key_idx >= 3)
 955		return -EINVAL;
 956
 957	ad.mci_enc = mci_cbc_enc_array[key_idx];
 958	ad.mci_dec = mci_cbc_dec_array[key_idx];
 959	ad.inv_key = true;
 960	ad.ealg_id = SA_EALG_ID_AES_CBC;
 961	ad.iv_idx = 4;
 962	ad.iv_out_size = 16;
 963
 964	return sa_cipher_setkey(tfm, key, keylen, &ad);
 965}
 966
 967static int sa_aes_ecb_setkey(struct crypto_skcipher *tfm, const u8 *key,
 968			     unsigned int keylen)
 969{
 970	struct algo_data ad = { 0 };
 971	/* Convert the key size (16/24/32) to the key size index (0/1/2) */
 972	int key_idx = (keylen >> 3) - 2;
 973
 974	if (key_idx >= 3)
 975		return -EINVAL;
 976
 977	ad.mci_enc = mci_ecb_enc_array[key_idx];
 978	ad.mci_dec = mci_ecb_dec_array[key_idx];
 979	ad.inv_key = true;
 980	ad.ealg_id = SA_EALG_ID_AES_ECB;
 981
 982	return sa_cipher_setkey(tfm, key, keylen, &ad);
 983}
 984
 985static int sa_3des_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key,
 986			      unsigned int keylen)
 987{
 988	struct algo_data ad = { 0 };
 989
 990	ad.mci_enc = mci_cbc_3des_enc_array;
 991	ad.mci_dec = mci_cbc_3des_dec_array;
 992	ad.ealg_id = SA_EALG_ID_3DES_CBC;
 993	ad.iv_idx = 6;
 994	ad.iv_out_size = 8;
 995
 996	return sa_cipher_setkey(tfm, key, keylen, &ad);
 997}
 998
 999static int sa_3des_ecb_setkey(struct crypto_skcipher *tfm, const u8 *key,
1000			      unsigned int keylen)
1001{
1002	struct algo_data ad = { 0 };
1003
1004	ad.mci_enc = mci_ecb_3des_enc_array;
1005	ad.mci_dec = mci_ecb_3des_dec_array;
1006
1007	return sa_cipher_setkey(tfm, key, keylen, &ad);
1008}
1009
1010static void sa_sync_from_device(struct sa_rx_data *rxd)
1011{
1012	struct sg_table *sgt;
1013
1014	if (rxd->mapped_sg[0].dir == DMA_BIDIRECTIONAL)
1015		sgt = &rxd->mapped_sg[0].sgt;
1016	else
1017		sgt = &rxd->mapped_sg[1].sgt;
1018
1019	dma_sync_sgtable_for_cpu(rxd->ddev, sgt, DMA_FROM_DEVICE);
1020}
1021
1022static void sa_free_sa_rx_data(struct sa_rx_data *rxd)
1023{
1024	int i;
1025
1026	for (i = 0; i < ARRAY_SIZE(rxd->mapped_sg); i++) {
1027		struct sa_mapped_sg *mapped_sg = &rxd->mapped_sg[i];
1028
1029		if (mapped_sg->mapped) {
1030			dma_unmap_sgtable(rxd->ddev, &mapped_sg->sgt,
1031					  mapped_sg->dir, 0);
1032			kfree(mapped_sg->split_sg);
1033		}
1034	}
1035
1036	kfree(rxd);
1037}
1038
1039static void sa_aes_dma_in_callback(void *data)
1040{
1041	struct sa_rx_data *rxd = data;
1042	struct skcipher_request *req;
 
1043	u32 *result;
1044	__be32 *mdptr;
1045	size_t ml, pl;
1046	int i;
 
 
1047
1048	sa_sync_from_device(rxd);
1049	req = container_of(rxd->req, struct skcipher_request, base);
 
 
 
 
1050
1051	if (req->iv) {
1052		mdptr = (__be32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl,
1053							       &ml);
1054		result = (u32 *)req->iv;
1055
1056		for (i = 0; i < (rxd->enc_iv_size / 4); i++)
1057			result[i] = be32_to_cpu(mdptr[i + rxd->iv_idx]);
1058	}
1059
1060	sa_free_sa_rx_data(rxd);
 
 
 
 
 
 
 
 
 
 
 
1061
1062	skcipher_request_complete(req, 0);
1063}
1064
1065static void
1066sa_prepare_tx_desc(u32 *mdptr, u32 pslen, u32 *psdata, u32 epiblen, u32 *epib)
1067{
1068	u32 *out, *in;
1069	int i;
1070
1071	for (out = mdptr, in = epib, i = 0; i < epiblen / sizeof(u32); i++)
1072		*out++ = *in++;
1073
1074	mdptr[4] = (0xFFFF << 16);
1075	for (out = &mdptr[5], in = psdata, i = 0;
1076	     i < pslen / sizeof(u32); i++)
1077		*out++ = *in++;
1078}
1079
1080static int sa_run(struct sa_req *req)
1081{
1082	struct sa_rx_data *rxd;
1083	gfp_t gfp_flags;
1084	u32 cmdl[SA_MAX_CMDL_WORDS];
1085	struct sa_crypto_data *pdata = dev_get_drvdata(sa_k3_dev);
1086	struct device *ddev;
1087	struct dma_chan *dma_rx;
1088	int sg_nents, src_nents, dst_nents;
 
1089	struct scatterlist *src, *dst;
1090	size_t pl, ml, split_size;
1091	struct sa_ctx_info *sa_ctx = req->enc ? &req->ctx->enc : &req->ctx->dec;
1092	int ret;
1093	struct dma_async_tx_descriptor *tx_out;
1094	u32 *mdptr;
1095	bool diff_dst;
1096	enum dma_data_direction dir_src;
1097	struct sa_mapped_sg *mapped_sg;
1098
1099	gfp_flags = req->base->flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
1100		GFP_KERNEL : GFP_ATOMIC;
1101
1102	rxd = kzalloc(sizeof(*rxd), gfp_flags);
1103	if (!rxd)
1104		return -ENOMEM;
1105
1106	if (req->src != req->dst) {
1107		diff_dst = true;
1108		dir_src = DMA_TO_DEVICE;
1109	} else {
1110		diff_dst = false;
1111		dir_src = DMA_BIDIRECTIONAL;
1112	}
1113
1114	/*
1115	 * SA2UL has an interesting feature where the receive DMA channel
1116	 * is selected based on the data passed to the engine. Within the
1117	 * transition range, there is also a space where it is impossible
1118	 * to determine where the data will end up, and this should be
1119	 * avoided. This will be handled by the SW fallback mechanism by
1120	 * the individual algorithm implementations.
1121	 */
1122	if (req->size >= 256)
1123		dma_rx = pdata->dma_rx2;
1124	else
1125		dma_rx = pdata->dma_rx1;
1126
1127	ddev = dmaengine_get_dma_device(pdata->dma_tx);
1128	rxd->ddev = ddev;
1129
1130	memcpy(cmdl, sa_ctx->cmdl, sa_ctx->cmdl_size);
1131
1132	sa_update_cmdl(req, cmdl, &sa_ctx->cmdl_upd_info);
1133
1134	if (req->type != CRYPTO_ALG_TYPE_AHASH) {
1135		if (req->enc)
1136			req->type |=
1137				(SA_REQ_SUBTYPE_ENC << SA_REQ_SUBTYPE_SHIFT);
1138		else
1139			req->type |=
1140				(SA_REQ_SUBTYPE_DEC << SA_REQ_SUBTYPE_SHIFT);
1141	}
1142
1143	cmdl[sa_ctx->cmdl_size / sizeof(u32)] = req->type;
1144
1145	/*
1146	 * Map the packets, first we check if the data fits into a single
1147	 * sg entry and use that if possible. If it does not fit, we check
1148	 * if we need to do sg_split to align the scatterlist data on the
1149	 * actual data size being processed by the crypto engine.
1150	 */
1151	src = req->src;
1152	sg_nents = sg_nents_for_len(src, req->size);
1153
1154	split_size = req->size;
1155
1156	mapped_sg = &rxd->mapped_sg[0];
1157	if (sg_nents == 1 && split_size <= req->src->length) {
1158		src = &mapped_sg->static_sg;
1159		src_nents = 1;
1160		sg_init_table(src, 1);
1161		sg_set_page(src, sg_page(req->src), split_size,
1162			    req->src->offset);
1163
1164		mapped_sg->sgt.sgl = src;
1165		mapped_sg->sgt.orig_nents = src_nents;
1166		ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0);
1167		if (ret) {
1168			kfree(rxd);
1169			return ret;
1170		}
1171
1172		mapped_sg->dir = dir_src;
1173		mapped_sg->mapped = true;
1174	} else {
1175		mapped_sg->sgt.sgl = req->src;
1176		mapped_sg->sgt.orig_nents = sg_nents;
1177		ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0);
1178		if (ret) {
1179			kfree(rxd);
1180			return ret;
1181		}
1182
1183		mapped_sg->dir = dir_src;
1184		mapped_sg->mapped = true;
1185
1186		ret = sg_split(mapped_sg->sgt.sgl, mapped_sg->sgt.nents, 0, 1,
1187			       &split_size, &src, &src_nents, gfp_flags);
1188		if (ret) {
1189			src_nents = mapped_sg->sgt.nents;
1190			src = mapped_sg->sgt.sgl;
1191		} else {
1192			mapped_sg->split_sg = src;
1193		}
1194	}
1195
1196	dma_sync_sgtable_for_device(ddev, &mapped_sg->sgt, DMA_TO_DEVICE);
1197
1198	if (!diff_dst) {
1199		dst_nents = src_nents;
1200		dst = src;
1201	} else {
1202		dst_nents = sg_nents_for_len(req->dst, req->size);
1203		mapped_sg = &rxd->mapped_sg[1];
1204
1205		if (dst_nents == 1 && split_size <= req->dst->length) {
1206			dst = &mapped_sg->static_sg;
1207			dst_nents = 1;
1208			sg_init_table(dst, 1);
1209			sg_set_page(dst, sg_page(req->dst), split_size,
1210				    req->dst->offset);
1211
1212			mapped_sg->sgt.sgl = dst;
1213			mapped_sg->sgt.orig_nents = dst_nents;
1214			ret = dma_map_sgtable(ddev, &mapped_sg->sgt,
1215					      DMA_FROM_DEVICE, 0);
1216			if (ret)
1217				goto err_cleanup;
1218
1219			mapped_sg->dir = DMA_FROM_DEVICE;
1220			mapped_sg->mapped = true;
1221		} else {
1222			mapped_sg->sgt.sgl = req->dst;
1223			mapped_sg->sgt.orig_nents = dst_nents;
1224			ret = dma_map_sgtable(ddev, &mapped_sg->sgt,
1225					      DMA_FROM_DEVICE, 0);
1226			if (ret)
1227				goto err_cleanup;
1228
1229			mapped_sg->dir = DMA_FROM_DEVICE;
1230			mapped_sg->mapped = true;
1231
1232			ret = sg_split(mapped_sg->sgt.sgl, mapped_sg->sgt.nents,
1233				       0, 1, &split_size, &dst, &dst_nents,
1234				       gfp_flags);
1235			if (ret) {
1236				dst_nents = mapped_sg->sgt.nents;
1237				dst = mapped_sg->sgt.sgl;
1238			} else {
1239				mapped_sg->split_sg = dst;
1240			}
1241		}
1242	}
1243
 
 
 
 
 
 
1244	rxd->tx_in = dmaengine_prep_slave_sg(dma_rx, dst, dst_nents,
1245					     DMA_DEV_TO_MEM,
1246					     DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1247	if (!rxd->tx_in) {
1248		dev_err(pdata->dev, "IN prep_slave_sg() failed\n");
1249		ret = -EINVAL;
1250		goto err_cleanup;
1251	}
1252
1253	rxd->req = (void *)req->base;
1254	rxd->enc = req->enc;
 
 
 
1255	rxd->iv_idx = req->ctx->iv_idx;
1256	rxd->enc_iv_size = sa_ctx->cmdl_upd_info.enc_iv.size;
1257	rxd->tx_in->callback = req->callback;
1258	rxd->tx_in->callback_param = rxd;
1259
1260	tx_out = dmaengine_prep_slave_sg(pdata->dma_tx, src,
1261					 src_nents, DMA_MEM_TO_DEV,
1262					 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1263
1264	if (!tx_out) {
1265		dev_err(pdata->dev, "OUT prep_slave_sg() failed\n");
1266		ret = -EINVAL;
1267		goto err_cleanup;
1268	}
1269
1270	/*
1271	 * Prepare metadata for DMA engine. This essentially describes the
1272	 * crypto algorithm to be used, data sizes, different keys etc.
1273	 */
1274	mdptr = (u32 *)dmaengine_desc_get_metadata_ptr(tx_out, &pl, &ml);
1275
1276	sa_prepare_tx_desc(mdptr, (sa_ctx->cmdl_size + (SA_PSDATA_CTX_WORDS *
1277				   sizeof(u32))), cmdl, sizeof(sa_ctx->epib),
1278			   sa_ctx->epib);
1279
1280	ml = sa_ctx->cmdl_size + (SA_PSDATA_CTX_WORDS * sizeof(u32));
1281	dmaengine_desc_set_metadata_len(tx_out, req->mdata_size);
1282
1283	dmaengine_submit(tx_out);
1284	dmaengine_submit(rxd->tx_in);
1285
1286	dma_async_issue_pending(dma_rx);
1287	dma_async_issue_pending(pdata->dma_tx);
1288
1289	return -EINPROGRESS;
1290
1291err_cleanup:
1292	sa_free_sa_rx_data(rxd);
 
 
 
 
 
 
 
 
 
1293
1294	return ret;
1295}
1296
1297static int sa_cipher_run(struct skcipher_request *req, u8 *iv, int enc)
1298{
1299	struct sa_tfm_ctx *ctx =
1300	    crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
1301	struct crypto_alg *alg = req->base.tfm->__crt_alg;
1302	struct sa_req sa_req = { 0 };
 
1303
1304	if (!req->cryptlen)
1305		return 0;
1306
1307	if (req->cryptlen % alg->cra_blocksize)
1308		return -EINVAL;
1309
1310	/* Use SW fallback if the data size is not supported */
1311	if (req->cryptlen > SA_MAX_DATA_SZ ||
1312	    (req->cryptlen >= SA_UNSAFE_DATA_SZ_MIN &&
1313	     req->cryptlen <= SA_UNSAFE_DATA_SZ_MAX)) {
1314		struct skcipher_request *subreq = skcipher_request_ctx(req);
1315
1316		skcipher_request_set_tfm(subreq, ctx->fallback.skcipher);
1317		skcipher_request_set_callback(subreq, req->base.flags,
1318					      req->base.complete,
1319					      req->base.data);
1320		skcipher_request_set_crypt(subreq, req->src, req->dst,
1321					   req->cryptlen, req->iv);
1322		if (enc)
1323			return crypto_skcipher_encrypt(subreq);
1324		else
1325			return crypto_skcipher_decrypt(subreq);
 
 
 
1326	}
1327
1328	sa_req.size = req->cryptlen;
1329	sa_req.enc_size = req->cryptlen;
1330	sa_req.src = req->src;
1331	sa_req.dst = req->dst;
1332	sa_req.enc_iv = iv;
1333	sa_req.type = CRYPTO_ALG_TYPE_SKCIPHER;
1334	sa_req.enc = enc;
1335	sa_req.callback = sa_aes_dma_in_callback;
1336	sa_req.mdata_size = 44;
1337	sa_req.base = &req->base;
1338	sa_req.ctx = ctx;
1339
1340	return sa_run(&sa_req);
1341}
1342
1343static int sa_encrypt(struct skcipher_request *req)
1344{
1345	return sa_cipher_run(req, req->iv, 1);
1346}
1347
1348static int sa_decrypt(struct skcipher_request *req)
1349{
1350	return sa_cipher_run(req, req->iv, 0);
1351}
1352
1353static void sa_sha_dma_in_callback(void *data)
1354{
1355	struct sa_rx_data *rxd = data;
1356	struct ahash_request *req;
1357	struct crypto_ahash *tfm;
1358	unsigned int authsize;
1359	int i;
1360	size_t ml, pl;
1361	u32 *result;
1362	__be32 *mdptr;
1363
1364	sa_sync_from_device(rxd);
1365	req = container_of(rxd->req, struct ahash_request, base);
1366	tfm = crypto_ahash_reqtfm(req);
1367	authsize = crypto_ahash_digestsize(tfm);
1368
1369	mdptr = (__be32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl, &ml);
1370	result = (u32 *)req->result;
1371
1372	for (i = 0; i < (authsize / 4); i++)
1373		result[i] = be32_to_cpu(mdptr[i + 4]);
1374
1375	sa_free_sa_rx_data(rxd);
 
 
 
 
 
1376
1377	ahash_request_complete(req, 0);
1378}
1379
1380static int zero_message_process(struct ahash_request *req)
1381{
1382	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1383	int sa_digest_size = crypto_ahash_digestsize(tfm);
1384
1385	switch (sa_digest_size) {
1386	case SHA1_DIGEST_SIZE:
1387		memcpy(req->result, sha1_zero_message_hash, sa_digest_size);
1388		break;
1389	case SHA256_DIGEST_SIZE:
1390		memcpy(req->result, sha256_zero_message_hash, sa_digest_size);
1391		break;
1392	case SHA512_DIGEST_SIZE:
1393		memcpy(req->result, sha512_zero_message_hash, sa_digest_size);
1394		break;
1395	default:
1396		return -EINVAL;
1397	}
1398
1399	return 0;
1400}
1401
1402static int sa_sha_run(struct ahash_request *req)
1403{
1404	struct sa_tfm_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
1405	struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1406	struct sa_req sa_req = { 0 };
1407	size_t auth_len;
1408
1409	auth_len = req->nbytes;
1410
1411	if (!auth_len)
1412		return zero_message_process(req);
1413
1414	if (auth_len > SA_MAX_DATA_SZ ||
1415	    (auth_len >= SA_UNSAFE_DATA_SZ_MIN &&
1416	     auth_len <= SA_UNSAFE_DATA_SZ_MAX)) {
1417		struct ahash_request *subreq = &rctx->fallback_req;
1418		int ret = 0;
1419
1420		ahash_request_set_tfm(subreq, ctx->fallback.ahash);
1421		subreq->base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1422
1423		crypto_ahash_init(subreq);
1424
1425		subreq->nbytes = auth_len;
1426		subreq->src = req->src;
1427		subreq->result = req->result;
1428
1429		ret |= crypto_ahash_update(subreq);
1430
1431		subreq->nbytes = 0;
1432
1433		ret |= crypto_ahash_final(subreq);
1434
1435		return ret;
1436	}
1437
1438	sa_req.size = auth_len;
1439	sa_req.auth_size = auth_len;
1440	sa_req.src = req->src;
1441	sa_req.dst = req->src;
1442	sa_req.enc = true;
1443	sa_req.type = CRYPTO_ALG_TYPE_AHASH;
1444	sa_req.callback = sa_sha_dma_in_callback;
1445	sa_req.mdata_size = 28;
1446	sa_req.ctx = ctx;
1447	sa_req.base = &req->base;
1448
1449	return sa_run(&sa_req);
1450}
1451
1452static int sa_sha_setup(struct sa_tfm_ctx *ctx, struct  algo_data *ad)
1453{
1454	int bs = crypto_shash_blocksize(ctx->shash);
1455	int cmdl_len;
1456	struct sa_cmdl_cfg cfg;
1457
1458	ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
1459	ad->auth_eng.eng_id = SA_ENG_ID_AM1;
1460	ad->auth_eng.sc_size = SA_CTX_AUTH_TYPE2_SZ;
1461
1462	memset(ctx->authkey, 0, bs);
1463	memset(&cfg, 0, sizeof(cfg));
1464	cfg.aalg = ad->aalg_id;
1465	cfg.enc_eng_id = ad->enc_eng.eng_id;
1466	cfg.auth_eng_id = ad->auth_eng.eng_id;
1467	cfg.iv_size = 0;
1468	cfg.akey = NULL;
1469	cfg.akey_len = 0;
1470
1471	ctx->dev_data = dev_get_drvdata(sa_k3_dev);
1472	/* Setup Encryption Security Context & Command label template */
1473	if (sa_init_sc(&ctx->enc, ctx->dev_data->match_data, NULL, 0, NULL, 0,
1474		       ad, 0, &ctx->enc.epib[1]))
1475		goto badkey;
1476
1477	cmdl_len = sa_format_cmdl_gen(&cfg,
1478				      (u8 *)ctx->enc.cmdl,
1479				      &ctx->enc.cmdl_upd_info);
1480	if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
1481		goto badkey;
1482
1483	ctx->enc.cmdl_size = cmdl_len;
1484
1485	return 0;
1486
1487badkey:
1488	dev_err(sa_k3_dev, "%s: badkey\n", __func__);
1489	return -EINVAL;
1490}
1491
1492static int sa_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
1493{
1494	struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1495	struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
1496	int ret;
1497
1498	memset(ctx, 0, sizeof(*ctx));
1499	ctx->dev_data = data;
1500	ret = sa_init_ctx_info(&ctx->enc, data);
1501	if (ret)
1502		return ret;
1503
1504	if (alg_base) {
1505		ctx->shash = crypto_alloc_shash(alg_base, 0,
1506						CRYPTO_ALG_NEED_FALLBACK);
1507		if (IS_ERR(ctx->shash)) {
1508			dev_err(sa_k3_dev, "base driver %s couldn't be loaded\n",
1509				alg_base);
1510			return PTR_ERR(ctx->shash);
1511		}
1512		/* for fallback */
1513		ctx->fallback.ahash =
1514			crypto_alloc_ahash(alg_base, 0,
1515					   CRYPTO_ALG_NEED_FALLBACK);
1516		if (IS_ERR(ctx->fallback.ahash)) {
1517			dev_err(ctx->dev_data->dev,
1518				"Could not load fallback driver\n");
1519			return PTR_ERR(ctx->fallback.ahash);
1520		}
1521	}
1522
1523	dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
1524		__func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
1525		ctx->dec.sc_id, &ctx->dec.sc_phys);
1526
1527	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1528				 sizeof(struct sa_sha_req_ctx) +
1529				 crypto_ahash_reqsize(ctx->fallback.ahash));
1530
1531	return 0;
1532}
1533
1534static int sa_sha_digest(struct ahash_request *req)
1535{
1536	return sa_sha_run(req);
1537}
1538
1539static int sa_sha_init(struct ahash_request *req)
1540{
1541	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1542	struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1543	struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1544
1545	dev_dbg(sa_k3_dev, "init: digest size: %u, rctx=%p\n",
1546		crypto_ahash_digestsize(tfm), rctx);
1547
1548	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1549	rctx->fallback_req.base.flags =
1550		req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1551
1552	return crypto_ahash_init(&rctx->fallback_req);
1553}
1554
1555static int sa_sha_update(struct ahash_request *req)
1556{
1557	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1558	struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1559	struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1560
1561	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1562	rctx->fallback_req.base.flags =
1563		req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1564	rctx->fallback_req.nbytes = req->nbytes;
1565	rctx->fallback_req.src = req->src;
1566
1567	return crypto_ahash_update(&rctx->fallback_req);
1568}
1569
1570static int sa_sha_final(struct ahash_request *req)
1571{
1572	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1573	struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1574	struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1575
1576	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1577	rctx->fallback_req.base.flags =
1578		req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1579	rctx->fallback_req.result = req->result;
1580
1581	return crypto_ahash_final(&rctx->fallback_req);
1582}
1583
1584static int sa_sha_finup(struct ahash_request *req)
1585{
1586	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1587	struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1588	struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1589
1590	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1591	rctx->fallback_req.base.flags =
1592		req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1593
1594	rctx->fallback_req.nbytes = req->nbytes;
1595	rctx->fallback_req.src = req->src;
1596	rctx->fallback_req.result = req->result;
1597
1598	return crypto_ahash_finup(&rctx->fallback_req);
1599}
1600
1601static int sa_sha_import(struct ahash_request *req, const void *in)
1602{
1603	struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1604	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1605	struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1606
1607	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1608	rctx->fallback_req.base.flags = req->base.flags &
1609		CRYPTO_TFM_REQ_MAY_SLEEP;
1610
1611	return crypto_ahash_import(&rctx->fallback_req, in);
1612}
1613
1614static int sa_sha_export(struct ahash_request *req, void *out)
1615{
1616	struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1617	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1618	struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1619	struct ahash_request *subreq = &rctx->fallback_req;
1620
1621	ahash_request_set_tfm(subreq, ctx->fallback.ahash);
1622	subreq->base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1623
1624	return crypto_ahash_export(subreq, out);
1625}
1626
1627static int sa_sha1_cra_init(struct crypto_tfm *tfm)
1628{
1629	struct algo_data ad = { 0 };
1630	struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1631
1632	sa_sha_cra_init_alg(tfm, "sha1");
1633
1634	ad.aalg_id = SA_AALG_ID_SHA1;
1635	ad.hash_size = SHA1_DIGEST_SIZE;
1636	ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA1;
1637
1638	sa_sha_setup(ctx, &ad);
1639
1640	return 0;
1641}
1642
1643static int sa_sha256_cra_init(struct crypto_tfm *tfm)
1644{
1645	struct algo_data ad = { 0 };
1646	struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1647
1648	sa_sha_cra_init_alg(tfm, "sha256");
1649
1650	ad.aalg_id = SA_AALG_ID_SHA2_256;
1651	ad.hash_size = SHA256_DIGEST_SIZE;
1652	ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA256;
1653
1654	sa_sha_setup(ctx, &ad);
1655
1656	return 0;
1657}
1658
1659static int sa_sha512_cra_init(struct crypto_tfm *tfm)
1660{
1661	struct algo_data ad = { 0 };
1662	struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1663
1664	sa_sha_cra_init_alg(tfm, "sha512");
1665
1666	ad.aalg_id = SA_AALG_ID_SHA2_512;
1667	ad.hash_size = SHA512_DIGEST_SIZE;
1668	ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA512;
1669
1670	sa_sha_setup(ctx, &ad);
1671
1672	return 0;
1673}
1674
1675static void sa_sha_cra_exit(struct crypto_tfm *tfm)
1676{
1677	struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1678	struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
1679
1680	dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
1681		__func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
1682		ctx->dec.sc_id, &ctx->dec.sc_phys);
1683
1684	if (crypto_tfm_alg_type(tfm) == CRYPTO_ALG_TYPE_AHASH)
1685		sa_free_ctx_info(&ctx->enc, data);
1686
1687	crypto_free_shash(ctx->shash);
1688	crypto_free_ahash(ctx->fallback.ahash);
1689}
1690
1691static void sa_aead_dma_in_callback(void *data)
1692{
1693	struct sa_rx_data *rxd = data;
1694	struct aead_request *req;
1695	struct crypto_aead *tfm;
1696	unsigned int start;
1697	unsigned int authsize;
1698	u8 auth_tag[SA_MAX_AUTH_TAG_SZ];
1699	size_t pl, ml;
1700	int i;
1701	int err = 0;
 
1702	u32 *mdptr;
 
 
1703
1704	sa_sync_from_device(rxd);
1705	req = container_of(rxd->req, struct aead_request, base);
1706	tfm = crypto_aead_reqtfm(req);
1707	start = req->assoclen + req->cryptlen;
1708	authsize = crypto_aead_authsize(tfm);
1709
 
 
 
1710	mdptr = (u32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl, &ml);
1711	for (i = 0; i < (authsize / 4); i++)
1712		mdptr[i + 4] = swab32(mdptr[i + 4]);
1713
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1714	if (rxd->enc) {
1715		scatterwalk_map_and_copy(&mdptr[4], req->dst, start, authsize,
1716					 1);
1717	} else {
1718		start -= authsize;
1719		scatterwalk_map_and_copy(auth_tag, req->src, start, authsize,
1720					 0);
1721
1722		err = memcmp(&mdptr[4], auth_tag, authsize) ? -EBADMSG : 0;
1723	}
1724
1725	sa_free_sa_rx_data(rxd);
1726
1727	aead_request_complete(req, err);
1728}
1729
1730static int sa_cra_init_aead(struct crypto_aead *tfm, const char *hash,
1731			    const char *fallback)
1732{
1733	struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
1734	struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
1735	int ret;
1736
1737	memzero_explicit(ctx, sizeof(*ctx));
1738	ctx->dev_data = data;
1739
1740	ctx->shash = crypto_alloc_shash(hash, 0, CRYPTO_ALG_NEED_FALLBACK);
1741	if (IS_ERR(ctx->shash)) {
1742		dev_err(sa_k3_dev, "base driver %s couldn't be loaded\n", hash);
1743		return PTR_ERR(ctx->shash);
1744	}
1745
1746	ctx->fallback.aead = crypto_alloc_aead(fallback, 0,
1747					       CRYPTO_ALG_NEED_FALLBACK);
1748
1749	if (IS_ERR(ctx->fallback.aead)) {
1750		dev_err(sa_k3_dev, "fallback driver %s couldn't be loaded\n",
1751			fallback);
1752		return PTR_ERR(ctx->fallback.aead);
1753	}
1754
1755	crypto_aead_set_reqsize(tfm, sizeof(struct aead_request) +
1756				crypto_aead_reqsize(ctx->fallback.aead));
1757
1758	ret = sa_init_ctx_info(&ctx->enc, data);
1759	if (ret)
1760		return ret;
1761
1762	ret = sa_init_ctx_info(&ctx->dec, data);
1763	if (ret) {
1764		sa_free_ctx_info(&ctx->enc, data);
1765		return ret;
1766	}
1767
1768	dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
1769		__func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
1770		ctx->dec.sc_id, &ctx->dec.sc_phys);
1771
1772	return ret;
1773}
1774
1775static int sa_cra_init_aead_sha1(struct crypto_aead *tfm)
1776{
1777	return sa_cra_init_aead(tfm, "sha1",
1778				"authenc(hmac(sha1-ce),cbc(aes-ce))");
1779}
1780
1781static int sa_cra_init_aead_sha256(struct crypto_aead *tfm)
1782{
1783	return sa_cra_init_aead(tfm, "sha256",
1784				"authenc(hmac(sha256-ce),cbc(aes-ce))");
1785}
1786
1787static void sa_exit_tfm_aead(struct crypto_aead *tfm)
1788{
1789	struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
1790	struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
1791
1792	crypto_free_shash(ctx->shash);
1793	crypto_free_aead(ctx->fallback.aead);
1794
1795	sa_free_ctx_info(&ctx->enc, data);
1796	sa_free_ctx_info(&ctx->dec, data);
1797}
1798
1799/* AEAD algorithm configuration interface function */
1800static int sa_aead_setkey(struct crypto_aead *authenc,
1801			  const u8 *key, unsigned int keylen,
1802			  struct algo_data *ad)
1803{
1804	struct sa_tfm_ctx *ctx = crypto_aead_ctx(authenc);
1805	struct crypto_authenc_keys keys;
1806	int cmdl_len;
1807	struct sa_cmdl_cfg cfg;
1808	int key_idx;
1809
1810	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
1811		return -EINVAL;
1812
1813	/* Convert the key size (16/24/32) to the key size index (0/1/2) */
1814	key_idx = (keys.enckeylen >> 3) - 2;
1815	if (key_idx >= 3)
1816		return -EINVAL;
1817
1818	ad->ctx = ctx;
1819	ad->enc_eng.eng_id = SA_ENG_ID_EM1;
1820	ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
1821	ad->auth_eng.eng_id = SA_ENG_ID_AM1;
1822	ad->auth_eng.sc_size = SA_CTX_AUTH_TYPE2_SZ;
1823	ad->mci_enc = mci_cbc_enc_no_iv_array[key_idx];
1824	ad->mci_dec = mci_cbc_dec_no_iv_array[key_idx];
1825	ad->inv_key = true;
1826	ad->keyed_mac = true;
1827	ad->ealg_id = SA_EALG_ID_AES_CBC;
1828	ad->prep_iopad = sa_prepare_iopads;
1829
1830	memset(&cfg, 0, sizeof(cfg));
1831	cfg.enc = true;
1832	cfg.aalg = ad->aalg_id;
1833	cfg.enc_eng_id = ad->enc_eng.eng_id;
1834	cfg.auth_eng_id = ad->auth_eng.eng_id;
1835	cfg.iv_size = crypto_aead_ivsize(authenc);
1836	cfg.akey = keys.authkey;
1837	cfg.akey_len = keys.authkeylen;
1838
1839	/* Setup Encryption Security Context & Command label template */
1840	if (sa_init_sc(&ctx->enc, ctx->dev_data->match_data, keys.enckey,
1841		       keys.enckeylen, keys.authkey, keys.authkeylen,
1842		       ad, 1, &ctx->enc.epib[1]))
1843		return -EINVAL;
1844
1845	cmdl_len = sa_format_cmdl_gen(&cfg,
1846				      (u8 *)ctx->enc.cmdl,
1847				      &ctx->enc.cmdl_upd_info);
1848	if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
1849		return -EINVAL;
1850
1851	ctx->enc.cmdl_size = cmdl_len;
1852
1853	/* Setup Decryption Security Context & Command label template */
1854	if (sa_init_sc(&ctx->dec, ctx->dev_data->match_data, keys.enckey,
1855		       keys.enckeylen, keys.authkey, keys.authkeylen,
1856		       ad, 0, &ctx->dec.epib[1]))
1857		return -EINVAL;
1858
1859	cfg.enc = false;
1860	cmdl_len = sa_format_cmdl_gen(&cfg, (u8 *)ctx->dec.cmdl,
1861				      &ctx->dec.cmdl_upd_info);
1862
1863	if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
1864		return -EINVAL;
1865
1866	ctx->dec.cmdl_size = cmdl_len;
1867
1868	crypto_aead_clear_flags(ctx->fallback.aead, CRYPTO_TFM_REQ_MASK);
1869	crypto_aead_set_flags(ctx->fallback.aead,
1870			      crypto_aead_get_flags(authenc) &
1871			      CRYPTO_TFM_REQ_MASK);
 
1872
1873	return crypto_aead_setkey(ctx->fallback.aead, key, keylen);
1874}
1875
1876static int sa_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
1877{
1878	struct sa_tfm_ctx *ctx = crypto_tfm_ctx(crypto_aead_tfm(tfm));
1879
1880	return crypto_aead_setauthsize(ctx->fallback.aead, authsize);
1881}
1882
1883static int sa_aead_cbc_sha1_setkey(struct crypto_aead *authenc,
1884				   const u8 *key, unsigned int keylen)
1885{
1886	struct algo_data ad = { 0 };
1887
1888	ad.ealg_id = SA_EALG_ID_AES_CBC;
1889	ad.aalg_id = SA_AALG_ID_HMAC_SHA1;
1890	ad.hash_size = SHA1_DIGEST_SIZE;
1891	ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA1;
1892
1893	return sa_aead_setkey(authenc, key, keylen, &ad);
1894}
1895
1896static int sa_aead_cbc_sha256_setkey(struct crypto_aead *authenc,
1897				     const u8 *key, unsigned int keylen)
1898{
1899	struct algo_data ad = { 0 };
1900
1901	ad.ealg_id = SA_EALG_ID_AES_CBC;
1902	ad.aalg_id = SA_AALG_ID_HMAC_SHA2_256;
1903	ad.hash_size = SHA256_DIGEST_SIZE;
1904	ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA256;
1905
1906	return sa_aead_setkey(authenc, key, keylen, &ad);
1907}
1908
1909static int sa_aead_run(struct aead_request *req, u8 *iv, int enc)
1910{
1911	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1912	struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
1913	struct sa_req sa_req = { 0 };
1914	size_t auth_size, enc_size;
1915
1916	enc_size = req->cryptlen;
1917	auth_size = req->assoclen + req->cryptlen;
1918
1919	if (!enc) {
1920		enc_size -= crypto_aead_authsize(tfm);
1921		auth_size -= crypto_aead_authsize(tfm);
1922	}
1923
1924	if (auth_size > SA_MAX_DATA_SZ ||
1925	    (auth_size >= SA_UNSAFE_DATA_SZ_MIN &&
1926	     auth_size <= SA_UNSAFE_DATA_SZ_MAX)) {
1927		struct aead_request *subreq = aead_request_ctx(req);
1928		int ret;
1929
1930		aead_request_set_tfm(subreq, ctx->fallback.aead);
1931		aead_request_set_callback(subreq, req->base.flags,
1932					  req->base.complete, req->base.data);
1933		aead_request_set_crypt(subreq, req->src, req->dst,
1934				       req->cryptlen, req->iv);
1935		aead_request_set_ad(subreq, req->assoclen);
1936
1937		ret = enc ? crypto_aead_encrypt(subreq) :
1938			crypto_aead_decrypt(subreq);
1939		return ret;
1940	}
1941
1942	sa_req.enc_offset = req->assoclen;
1943	sa_req.enc_size = enc_size;
1944	sa_req.auth_size = auth_size;
1945	sa_req.size = auth_size;
1946	sa_req.enc_iv = iv;
1947	sa_req.type = CRYPTO_ALG_TYPE_AEAD;
1948	sa_req.enc = enc;
1949	sa_req.callback = sa_aead_dma_in_callback;
1950	sa_req.mdata_size = 52;
1951	sa_req.base = &req->base;
1952	sa_req.ctx = ctx;
1953	sa_req.src = req->src;
1954	sa_req.dst = req->dst;
1955
1956	return sa_run(&sa_req);
1957}
1958
1959/* AEAD algorithm encrypt interface function */
1960static int sa_aead_encrypt(struct aead_request *req)
1961{
1962	return sa_aead_run(req, req->iv, 1);
1963}
1964
1965/* AEAD algorithm decrypt interface function */
1966static int sa_aead_decrypt(struct aead_request *req)
1967{
1968	return sa_aead_run(req, req->iv, 0);
1969}
1970
1971static struct sa_alg_tmpl sa_algs[] = {
1972	[SA_ALG_CBC_AES] = {
1973		.type = CRYPTO_ALG_TYPE_SKCIPHER,
1974		.alg.skcipher = {
1975			.base.cra_name		= "cbc(aes)",
1976			.base.cra_driver_name	= "cbc-aes-sa2ul",
1977			.base.cra_priority	= 30000,
1978			.base.cra_flags		= CRYPTO_ALG_TYPE_SKCIPHER |
1979						  CRYPTO_ALG_KERN_DRIVER_ONLY |
1980						  CRYPTO_ALG_ASYNC |
1981						  CRYPTO_ALG_NEED_FALLBACK,
1982			.base.cra_blocksize	= AES_BLOCK_SIZE,
1983			.base.cra_ctxsize	= sizeof(struct sa_tfm_ctx),
1984			.base.cra_module	= THIS_MODULE,
1985			.init			= sa_cipher_cra_init,
1986			.exit			= sa_cipher_cra_exit,
1987			.min_keysize		= AES_MIN_KEY_SIZE,
1988			.max_keysize		= AES_MAX_KEY_SIZE,
1989			.ivsize			= AES_BLOCK_SIZE,
1990			.setkey			= sa_aes_cbc_setkey,
1991			.encrypt		= sa_encrypt,
1992			.decrypt		= sa_decrypt,
1993		}
1994	},
1995	[SA_ALG_EBC_AES] = {
1996		.type = CRYPTO_ALG_TYPE_SKCIPHER,
1997		.alg.skcipher = {
1998			.base.cra_name		= "ecb(aes)",
1999			.base.cra_driver_name	= "ecb-aes-sa2ul",
2000			.base.cra_priority	= 30000,
2001			.base.cra_flags		= CRYPTO_ALG_TYPE_SKCIPHER |
2002						  CRYPTO_ALG_KERN_DRIVER_ONLY |
2003						  CRYPTO_ALG_ASYNC |
2004						  CRYPTO_ALG_NEED_FALLBACK,
2005			.base.cra_blocksize	= AES_BLOCK_SIZE,
2006			.base.cra_ctxsize	= sizeof(struct sa_tfm_ctx),
2007			.base.cra_module	= THIS_MODULE,
2008			.init			= sa_cipher_cra_init,
2009			.exit			= sa_cipher_cra_exit,
2010			.min_keysize		= AES_MIN_KEY_SIZE,
2011			.max_keysize		= AES_MAX_KEY_SIZE,
2012			.setkey			= sa_aes_ecb_setkey,
2013			.encrypt		= sa_encrypt,
2014			.decrypt		= sa_decrypt,
2015		}
2016	},
2017	[SA_ALG_CBC_DES3] = {
2018		.type = CRYPTO_ALG_TYPE_SKCIPHER,
2019		.alg.skcipher = {
2020			.base.cra_name		= "cbc(des3_ede)",
2021			.base.cra_driver_name	= "cbc-des3-sa2ul",
2022			.base.cra_priority	= 30000,
2023			.base.cra_flags		= CRYPTO_ALG_TYPE_SKCIPHER |
2024						  CRYPTO_ALG_KERN_DRIVER_ONLY |
2025						  CRYPTO_ALG_ASYNC |
2026						  CRYPTO_ALG_NEED_FALLBACK,
2027			.base.cra_blocksize	= DES_BLOCK_SIZE,
2028			.base.cra_ctxsize	= sizeof(struct sa_tfm_ctx),
2029			.base.cra_module	= THIS_MODULE,
2030			.init			= sa_cipher_cra_init,
2031			.exit			= sa_cipher_cra_exit,
2032			.min_keysize		= 3 * DES_KEY_SIZE,
2033			.max_keysize		= 3 * DES_KEY_SIZE,
2034			.ivsize			= DES_BLOCK_SIZE,
2035			.setkey			= sa_3des_cbc_setkey,
2036			.encrypt		= sa_encrypt,
2037			.decrypt		= sa_decrypt,
2038		}
2039	},
2040	[SA_ALG_ECB_DES3] = {
2041		.type = CRYPTO_ALG_TYPE_SKCIPHER,
2042		.alg.skcipher = {
2043			.base.cra_name		= "ecb(des3_ede)",
2044			.base.cra_driver_name	= "ecb-des3-sa2ul",
2045			.base.cra_priority	= 30000,
2046			.base.cra_flags		= CRYPTO_ALG_TYPE_SKCIPHER |
2047						  CRYPTO_ALG_KERN_DRIVER_ONLY |
2048						  CRYPTO_ALG_ASYNC |
2049						  CRYPTO_ALG_NEED_FALLBACK,
2050			.base.cra_blocksize	= DES_BLOCK_SIZE,
2051			.base.cra_ctxsize	= sizeof(struct sa_tfm_ctx),
2052			.base.cra_module	= THIS_MODULE,
2053			.init			= sa_cipher_cra_init,
2054			.exit			= sa_cipher_cra_exit,
2055			.min_keysize		= 3 * DES_KEY_SIZE,
2056			.max_keysize		= 3 * DES_KEY_SIZE,
2057			.setkey			= sa_3des_ecb_setkey,
2058			.encrypt		= sa_encrypt,
2059			.decrypt		= sa_decrypt,
2060		}
2061	},
2062	[SA_ALG_SHA1] = {
2063		.type = CRYPTO_ALG_TYPE_AHASH,
2064		.alg.ahash = {
2065			.halg.base = {
2066				.cra_name	= "sha1",
2067				.cra_driver_name	= "sha1-sa2ul",
2068				.cra_priority	= 400,
2069				.cra_flags	= CRYPTO_ALG_TYPE_AHASH |
2070						  CRYPTO_ALG_ASYNC |
2071						  CRYPTO_ALG_KERN_DRIVER_ONLY |
2072						  CRYPTO_ALG_NEED_FALLBACK,
2073				.cra_blocksize	= SHA1_BLOCK_SIZE,
2074				.cra_ctxsize	= sizeof(struct sa_tfm_ctx),
2075				.cra_module	= THIS_MODULE,
2076				.cra_init	= sa_sha1_cra_init,
2077				.cra_exit	= sa_sha_cra_exit,
2078			},
2079			.halg.digestsize	= SHA1_DIGEST_SIZE,
2080			.halg.statesize		= sizeof(struct sa_sha_req_ctx) +
2081						  sizeof(struct sha1_state),
2082			.init			= sa_sha_init,
2083			.update			= sa_sha_update,
2084			.final			= sa_sha_final,
2085			.finup			= sa_sha_finup,
2086			.digest			= sa_sha_digest,
2087			.export			= sa_sha_export,
2088			.import			= sa_sha_import,
2089		},
2090	},
2091	[SA_ALG_SHA256] = {
2092		.type = CRYPTO_ALG_TYPE_AHASH,
2093		.alg.ahash = {
2094			.halg.base = {
2095				.cra_name	= "sha256",
2096				.cra_driver_name	= "sha256-sa2ul",
2097				.cra_priority	= 400,
2098				.cra_flags	= CRYPTO_ALG_TYPE_AHASH |
2099						  CRYPTO_ALG_ASYNC |
2100						  CRYPTO_ALG_KERN_DRIVER_ONLY |
2101						  CRYPTO_ALG_NEED_FALLBACK,
2102				.cra_blocksize	= SHA256_BLOCK_SIZE,
2103				.cra_ctxsize	= sizeof(struct sa_tfm_ctx),
2104				.cra_module	= THIS_MODULE,
2105				.cra_init	= sa_sha256_cra_init,
2106				.cra_exit	= sa_sha_cra_exit,
2107			},
2108			.halg.digestsize	= SHA256_DIGEST_SIZE,
2109			.halg.statesize		= sizeof(struct sa_sha_req_ctx) +
2110						  sizeof(struct sha256_state),
2111			.init			= sa_sha_init,
2112			.update			= sa_sha_update,
2113			.final			= sa_sha_final,
2114			.finup			= sa_sha_finup,
2115			.digest			= sa_sha_digest,
2116			.export			= sa_sha_export,
2117			.import			= sa_sha_import,
2118		},
2119	},
2120	[SA_ALG_SHA512] = {
2121		.type = CRYPTO_ALG_TYPE_AHASH,
2122		.alg.ahash = {
2123			.halg.base = {
2124				.cra_name	= "sha512",
2125				.cra_driver_name	= "sha512-sa2ul",
2126				.cra_priority	= 400,
2127				.cra_flags	= CRYPTO_ALG_TYPE_AHASH |
2128						  CRYPTO_ALG_ASYNC |
2129						  CRYPTO_ALG_KERN_DRIVER_ONLY |
2130						  CRYPTO_ALG_NEED_FALLBACK,
2131				.cra_blocksize	= SHA512_BLOCK_SIZE,
2132				.cra_ctxsize	= sizeof(struct sa_tfm_ctx),
2133				.cra_module	= THIS_MODULE,
2134				.cra_init	= sa_sha512_cra_init,
2135				.cra_exit	= sa_sha_cra_exit,
2136			},
2137			.halg.digestsize	= SHA512_DIGEST_SIZE,
2138			.halg.statesize		= sizeof(struct sa_sha_req_ctx) +
2139						  sizeof(struct sha512_state),
2140			.init			= sa_sha_init,
2141			.update			= sa_sha_update,
2142			.final			= sa_sha_final,
2143			.finup			= sa_sha_finup,
2144			.digest			= sa_sha_digest,
2145			.export			= sa_sha_export,
2146			.import			= sa_sha_import,
2147		},
2148	},
2149	[SA_ALG_AUTHENC_SHA1_AES] = {
2150		.type	= CRYPTO_ALG_TYPE_AEAD,
2151		.alg.aead = {
2152			.base = {
2153				.cra_name = "authenc(hmac(sha1),cbc(aes))",
2154				.cra_driver_name =
2155					"authenc(hmac(sha1),cbc(aes))-sa2ul",
2156				.cra_blocksize = AES_BLOCK_SIZE,
2157				.cra_flags = CRYPTO_ALG_TYPE_AEAD |
2158					CRYPTO_ALG_KERN_DRIVER_ONLY |
2159					CRYPTO_ALG_ASYNC |
2160					CRYPTO_ALG_NEED_FALLBACK,
2161				.cra_ctxsize = sizeof(struct sa_tfm_ctx),
2162				.cra_module = THIS_MODULE,
2163				.cra_priority = 3000,
2164			},
2165			.ivsize = AES_BLOCK_SIZE,
2166			.maxauthsize = SHA1_DIGEST_SIZE,
2167
2168			.init = sa_cra_init_aead_sha1,
2169			.exit = sa_exit_tfm_aead,
2170			.setkey = sa_aead_cbc_sha1_setkey,
2171			.setauthsize = sa_aead_setauthsize,
2172			.encrypt = sa_aead_encrypt,
2173			.decrypt = sa_aead_decrypt,
2174		},
2175	},
2176	[SA_ALG_AUTHENC_SHA256_AES] = {
2177		.type	= CRYPTO_ALG_TYPE_AEAD,
2178		.alg.aead = {
2179			.base = {
2180				.cra_name = "authenc(hmac(sha256),cbc(aes))",
2181				.cra_driver_name =
2182					"authenc(hmac(sha256),cbc(aes))-sa2ul",
2183				.cra_blocksize = AES_BLOCK_SIZE,
2184				.cra_flags = CRYPTO_ALG_TYPE_AEAD |
2185					CRYPTO_ALG_KERN_DRIVER_ONLY |
2186					CRYPTO_ALG_ASYNC |
2187					CRYPTO_ALG_NEED_FALLBACK,
2188				.cra_ctxsize = sizeof(struct sa_tfm_ctx),
2189				.cra_module = THIS_MODULE,
2190				.cra_alignmask = 0,
2191				.cra_priority = 3000,
2192			},
2193			.ivsize = AES_BLOCK_SIZE,
2194			.maxauthsize = SHA256_DIGEST_SIZE,
2195
2196			.init = sa_cra_init_aead_sha256,
2197			.exit = sa_exit_tfm_aead,
2198			.setkey = sa_aead_cbc_sha256_setkey,
2199			.setauthsize = sa_aead_setauthsize,
2200			.encrypt = sa_aead_encrypt,
2201			.decrypt = sa_aead_decrypt,
2202		},
2203	},
2204};
2205
2206/* Register the algorithms in crypto framework */
2207static void sa_register_algos(struct sa_crypto_data *dev_data)
2208{
2209	const struct sa_match_data *match_data = dev_data->match_data;
2210	struct device *dev = dev_data->dev;
2211	char *alg_name;
2212	u32 type;
2213	int i, err;
2214
2215	for (i = 0; i < ARRAY_SIZE(sa_algs); i++) {
2216		/* Skip unsupported algos */
2217		if (!(match_data->supported_algos & BIT(i)))
2218			continue;
2219
2220		type = sa_algs[i].type;
2221		if (type == CRYPTO_ALG_TYPE_SKCIPHER) {
2222			alg_name = sa_algs[i].alg.skcipher.base.cra_name;
2223			err = crypto_register_skcipher(&sa_algs[i].alg.skcipher);
2224		} else if (type == CRYPTO_ALG_TYPE_AHASH) {
2225			alg_name = sa_algs[i].alg.ahash.halg.base.cra_name;
2226			err = crypto_register_ahash(&sa_algs[i].alg.ahash);
2227		} else if (type == CRYPTO_ALG_TYPE_AEAD) {
2228			alg_name = sa_algs[i].alg.aead.base.cra_name;
2229			err = crypto_register_aead(&sa_algs[i].alg.aead);
2230		} else {
2231			dev_err(dev,
2232				"un-supported crypto algorithm (%d)",
2233				sa_algs[i].type);
2234			continue;
2235		}
2236
2237		if (err)
2238			dev_err(dev, "Failed to register '%s'\n", alg_name);
2239		else
2240			sa_algs[i].registered = true;
2241	}
2242}
2243
2244/* Unregister the algorithms in crypto framework */
2245static void sa_unregister_algos(const struct device *dev)
2246{
2247	u32 type;
2248	int i;
2249
2250	for (i = 0; i < ARRAY_SIZE(sa_algs); i++) {
2251		type = sa_algs[i].type;
2252		if (!sa_algs[i].registered)
2253			continue;
2254		if (type == CRYPTO_ALG_TYPE_SKCIPHER)
2255			crypto_unregister_skcipher(&sa_algs[i].alg.skcipher);
2256		else if (type == CRYPTO_ALG_TYPE_AHASH)
2257			crypto_unregister_ahash(&sa_algs[i].alg.ahash);
2258		else if (type == CRYPTO_ALG_TYPE_AEAD)
2259			crypto_unregister_aead(&sa_algs[i].alg.aead);
2260
2261		sa_algs[i].registered = false;
2262	}
2263}
2264
2265static int sa_init_mem(struct sa_crypto_data *dev_data)
2266{
2267	struct device *dev = &dev_data->pdev->dev;
2268	/* Setup dma pool for security context buffers */
2269	dev_data->sc_pool = dma_pool_create("keystone-sc", dev,
2270					    SA_CTX_MAX_SZ, 64, 0);
2271	if (!dev_data->sc_pool) {
2272		dev_err(dev, "Failed to create dma pool");
2273		return -ENOMEM;
2274	}
2275
2276	return 0;
2277}
2278
2279static int sa_dma_init(struct sa_crypto_data *dd)
2280{
2281	int ret;
2282	struct dma_slave_config cfg;
2283
2284	dd->dma_rx1 = NULL;
2285	dd->dma_tx = NULL;
2286	dd->dma_rx2 = NULL;
2287
2288	ret = dma_coerce_mask_and_coherent(dd->dev, DMA_BIT_MASK(48));
2289	if (ret)
2290		return ret;
2291
2292	dd->dma_rx1 = dma_request_chan(dd->dev, "rx1");
2293	if (IS_ERR(dd->dma_rx1))
2294		return dev_err_probe(dd->dev, PTR_ERR(dd->dma_rx1),
2295				     "Unable to request rx1 DMA channel\n");
 
 
2296
2297	dd->dma_rx2 = dma_request_chan(dd->dev, "rx2");
2298	if (IS_ERR(dd->dma_rx2)) {
2299		ret = dev_err_probe(dd->dev, PTR_ERR(dd->dma_rx2),
2300				    "Unable to request rx2 DMA channel\n");
2301		goto err_dma_rx2;
 
2302	}
2303
2304	dd->dma_tx = dma_request_chan(dd->dev, "tx");
2305	if (IS_ERR(dd->dma_tx)) {
2306		ret = dev_err_probe(dd->dev, PTR_ERR(dd->dma_tx),
2307				    "Unable to request tx DMA channel\n");
 
2308		goto err_dma_tx;
2309	}
2310
2311	memzero_explicit(&cfg, sizeof(cfg));
2312
2313	cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2314	cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2315	cfg.src_maxburst = 4;
2316	cfg.dst_maxburst = 4;
2317
2318	ret = dmaengine_slave_config(dd->dma_rx1, &cfg);
2319	if (ret) {
2320		dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
2321			ret);
2322		goto err_dma_config;
2323	}
2324
2325	ret = dmaengine_slave_config(dd->dma_rx2, &cfg);
2326	if (ret) {
2327		dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
2328			ret);
2329		goto err_dma_config;
2330	}
2331
2332	ret = dmaengine_slave_config(dd->dma_tx, &cfg);
2333	if (ret) {
2334		dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
2335			ret);
2336		goto err_dma_config;
2337	}
2338
2339	return 0;
2340
2341err_dma_config:
2342	dma_release_channel(dd->dma_tx);
2343err_dma_tx:
2344	dma_release_channel(dd->dma_rx2);
2345err_dma_rx2:
2346	dma_release_channel(dd->dma_rx1);
 
2347
2348	return ret;
2349}
2350
2351static int sa_link_child(struct device *dev, void *data)
2352{
2353	struct device *parent = data;
2354
2355	device_link_add(dev, parent, DL_FLAG_AUTOPROBE_CONSUMER);
2356
2357	return 0;
2358}
2359
2360static struct sa_match_data am654_match_data = {
2361	.priv = 1,
2362	.priv_id = 1,
2363	.supported_algos = BIT(SA_ALG_CBC_AES) |
2364			   BIT(SA_ALG_EBC_AES) |
2365			   BIT(SA_ALG_CBC_DES3) |
2366			   BIT(SA_ALG_ECB_DES3) |
2367			   BIT(SA_ALG_SHA1) |
2368			   BIT(SA_ALG_SHA256) |
2369			   BIT(SA_ALG_SHA512) |
2370			   BIT(SA_ALG_AUTHENC_SHA1_AES) |
2371			   BIT(SA_ALG_AUTHENC_SHA256_AES),
2372};
2373
2374static struct sa_match_data am64_match_data = {
2375	.priv = 0,
2376	.priv_id = 0,
2377	.supported_algos = BIT(SA_ALG_CBC_AES) |
2378			   BIT(SA_ALG_EBC_AES) |
2379			   BIT(SA_ALG_SHA256) |
2380			   BIT(SA_ALG_SHA512) |
2381			   BIT(SA_ALG_AUTHENC_SHA256_AES),
2382};
2383
2384static const struct of_device_id of_match[] = {
2385	{ .compatible = "ti,j721e-sa2ul", .data = &am654_match_data, },
2386	{ .compatible = "ti,am654-sa2ul", .data = &am654_match_data, },
2387	{ .compatible = "ti,am64-sa2ul", .data = &am64_match_data, },
2388	{ .compatible = "ti,am62-sa3ul", .data = &am64_match_data, },
2389	{},
2390};
2391MODULE_DEVICE_TABLE(of, of_match);
2392
2393static int sa_ul_probe(struct platform_device *pdev)
2394{
2395	struct device *dev = &pdev->dev;
2396	struct device_node *node = dev->of_node;
 
2397	static void __iomem *saul_base;
2398	struct sa_crypto_data *dev_data;
2399	u32 status, val;
2400	int ret;
2401
2402	dev_data = devm_kzalloc(dev, sizeof(*dev_data), GFP_KERNEL);
2403	if (!dev_data)
2404		return -ENOMEM;
2405
2406	dev_data->match_data = of_device_get_match_data(dev);
2407	if (!dev_data->match_data)
2408		return -ENODEV;
2409
2410	saul_base = devm_platform_ioremap_resource(pdev, 0);
2411	if (IS_ERR(saul_base))
2412		return PTR_ERR(saul_base);
2413
2414	sa_k3_dev = dev;
2415	dev_data->dev = dev;
2416	dev_data->pdev = pdev;
2417	dev_data->base = saul_base;
2418	platform_set_drvdata(pdev, dev_data);
2419	dev_set_drvdata(sa_k3_dev, dev_data);
2420
2421	pm_runtime_enable(dev);
2422	ret = pm_runtime_resume_and_get(dev);
2423	if (ret < 0) {
2424		dev_err(dev, "%s: failed to get sync: %d\n", __func__, ret);
2425		pm_runtime_disable(dev);
2426		return ret;
2427	}
2428
2429	sa_init_mem(dev_data);
2430	ret = sa_dma_init(dev_data);
2431	if (ret)
2432		goto destroy_dma_pool;
2433
2434	spin_lock_init(&dev_data->scid_lock);
 
 
2435
 
2436	val = SA_EEC_ENCSS_EN | SA_EEC_AUTHSS_EN | SA_EEC_CTXCACH_EN |
2437	      SA_EEC_CPPI_PORT_IN_EN | SA_EEC_CPPI_PORT_OUT_EN |
2438	      SA_EEC_TRNG_EN;
2439	status = readl_relaxed(saul_base + SA_ENGINE_STATUS);
2440	/* Only enable engines if all are not already enabled */
2441	if (val & ~status)
2442		writel_relaxed(val, saul_base + SA_ENGINE_ENABLE_CONTROL);
2443
2444	sa_register_algos(dev_data);
2445
2446	ret = of_platform_populate(node, NULL, NULL, dev);
 
 
2447	if (ret)
2448		goto release_dma;
2449
2450	device_for_each_child(dev, dev, sa_link_child);
2451
2452	return 0;
2453
2454release_dma:
2455	sa_unregister_algos(dev);
2456
2457	dma_release_channel(dev_data->dma_rx2);
2458	dma_release_channel(dev_data->dma_rx1);
2459	dma_release_channel(dev_data->dma_tx);
2460
2461destroy_dma_pool:
2462	dma_pool_destroy(dev_data->sc_pool);
2463
2464	pm_runtime_put_sync(dev);
2465	pm_runtime_disable(dev);
 
2466
2467	return ret;
2468}
2469
2470static void sa_ul_remove(struct platform_device *pdev)
2471{
2472	struct sa_crypto_data *dev_data = platform_get_drvdata(pdev);
2473
2474	of_platform_depopulate(&pdev->dev);
2475
2476	sa_unregister_algos(&pdev->dev);
2477
2478	dma_release_channel(dev_data->dma_rx2);
2479	dma_release_channel(dev_data->dma_rx1);
2480	dma_release_channel(dev_data->dma_tx);
2481
2482	dma_pool_destroy(dev_data->sc_pool);
2483
2484	platform_set_drvdata(pdev, NULL);
2485
2486	pm_runtime_put_sync(&pdev->dev);
2487	pm_runtime_disable(&pdev->dev);
 
 
2488}
2489
 
 
 
 
 
 
 
2490static struct platform_driver sa_ul_driver = {
2491	.probe = sa_ul_probe,
2492	.remove_new = sa_ul_remove,
2493	.driver = {
2494		   .name = "saul-crypto",
2495		   .of_match_table = of_match,
2496		   },
2497};
2498module_platform_driver(sa_ul_driver);
2499MODULE_LICENSE("GPL v2");
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * K3 SA2UL crypto accelerator driver
   4 *
   5 * Copyright (C) 2018-2020 Texas Instruments Incorporated - http://www.ti.com
   6 *
   7 * Authors:	Keerthy
   8 *		Vitaly Andrianov
   9 *		Tero Kristo
  10 */
 
  11#include <linux/clk.h>
 
  12#include <linux/dmaengine.h>
  13#include <linux/dmapool.h>
 
  14#include <linux/module.h>
  15#include <linux/of_device.h>
 
  16#include <linux/platform_device.h>
  17#include <linux/pm_runtime.h>
  18
  19#include <crypto/aes.h>
  20#include <crypto/authenc.h>
  21#include <crypto/des.h>
  22#include <crypto/internal/aead.h>
  23#include <crypto/internal/hash.h>
  24#include <crypto/internal/skcipher.h>
  25#include <crypto/scatterwalk.h>
  26#include <crypto/sha.h>
 
  27
  28#include "sa2ul.h"
  29
  30/* Byte offset for key in encryption security context */
  31#define SC_ENC_KEY_OFFSET (1 + 27 + 4)
  32/* Byte offset for Aux-1 in encryption security context */
  33#define SC_ENC_AUX1_OFFSET (1 + 27 + 4 + 32)
  34
  35#define SA_CMDL_UPD_ENC         0x0001
  36#define SA_CMDL_UPD_AUTH        0x0002
  37#define SA_CMDL_UPD_ENC_IV      0x0004
  38#define SA_CMDL_UPD_AUTH_IV     0x0008
  39#define SA_CMDL_UPD_AUX_KEY     0x0010
  40
  41#define SA_AUTH_SUBKEY_LEN	16
  42#define SA_CMDL_PAYLOAD_LENGTH_MASK	0xFFFF
  43#define SA_CMDL_SOP_BYPASS_LEN_MASK	0xFF000000
  44
  45#define MODE_CONTROL_BYTES	27
  46#define SA_HASH_PROCESSING	0
  47#define SA_CRYPTO_PROCESSING	0
  48#define SA_UPLOAD_HASH_TO_TLR	BIT(6)
  49
  50#define SA_SW0_FLAGS_MASK	0xF0000
  51#define SA_SW0_CMDL_INFO_MASK	0x1F00000
  52#define SA_SW0_CMDL_PRESENT	BIT(4)
  53#define SA_SW0_ENG_ID_MASK	0x3E000000
  54#define SA_SW0_DEST_INFO_PRESENT	BIT(30)
  55#define SA_SW2_EGRESS_LENGTH		0xFF000000
  56#define SA_BASIC_HASH		0x10
  57
  58#define SHA256_DIGEST_WORDS    8
  59/* Make 32-bit word from 4 bytes */
  60#define SA_MK_U32(b0, b1, b2, b3) (((b0) << 24) | ((b1) << 16) | \
  61				   ((b2) << 8) | (b3))
  62
  63/* size of SCCTL structure in bytes */
  64#define SA_SCCTL_SZ 16
  65
  66/* Max Authentication tag size */
  67#define SA_MAX_AUTH_TAG_SZ 64
  68
  69#define PRIV_ID	0x1
  70#define PRIV	0x1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  71
  72static struct device *sa_k3_dev;
  73
  74/**
  75 * struct sa_cmdl_cfg - Command label configuration descriptor
  76 * @aalg: authentication algorithm ID
  77 * @enc_eng_id: Encryption Engine ID supported by the SA hardware
  78 * @auth_eng_id: Authentication Engine ID
  79 * @iv_size: Initialization Vector size
  80 * @akey: Authentication key
  81 * @akey_len: Authentication key length
  82 * @enc: True, if this is an encode request
  83 */
  84struct sa_cmdl_cfg {
  85	int aalg;
  86	u8 enc_eng_id;
  87	u8 auth_eng_id;
  88	u8 iv_size;
  89	const u8 *akey;
  90	u16 akey_len;
  91	bool enc;
  92};
  93
  94/**
  95 * struct algo_data - Crypto algorithm specific data
  96 * @enc_eng: Encryption engine info structure
  97 * @auth_eng: Authentication engine info structure
  98 * @auth_ctrl: Authentication control word
  99 * @hash_size: Size of digest
 100 * @iv_idx: iv index in psdata
 101 * @iv_out_size: iv out size
 102 * @ealg_id: Encryption Algorithm ID
 103 * @aalg_id: Authentication algorithm ID
 104 * @mci_enc: Mode Control Instruction for Encryption algorithm
 105 * @mci_dec: Mode Control Instruction for Decryption
 106 * @inv_key: Whether the encryption algorithm demands key inversion
 107 * @ctx: Pointer to the algorithm context
 108 * @keyed_mac: Whether the authentication algorithm has key
 109 * @prep_iopad: Function pointer to generate intermediate ipad/opad
 110 */
 111struct algo_data {
 112	struct sa_eng_info enc_eng;
 113	struct sa_eng_info auth_eng;
 114	u8 auth_ctrl;
 115	u8 hash_size;
 116	u8 iv_idx;
 117	u8 iv_out_size;
 118	u8 ealg_id;
 119	u8 aalg_id;
 120	u8 *mci_enc;
 121	u8 *mci_dec;
 122	bool inv_key;
 123	struct sa_tfm_ctx *ctx;
 124	bool keyed_mac;
 125	void (*prep_iopad)(struct algo_data *algo, const u8 *key,
 126			   u16 key_sz, __be32 *ipad, __be32 *opad);
 127};
 128
 129/**
 130 * struct sa_alg_tmpl: A generic template encompassing crypto/aead algorithms
 131 * @type: Type of the crypto algorithm.
 132 * @alg: Union of crypto algorithm definitions.
 133 * @registered: Flag indicating if the crypto algorithm is already registered
 134 */
 135struct sa_alg_tmpl {
 136	u32 type;		/* CRYPTO_ALG_TYPE from <linux/crypto.h> */
 137	union {
 138		struct skcipher_alg skcipher;
 139		struct ahash_alg ahash;
 140		struct aead_alg aead;
 141	} alg;
 142	bool registered;
 143};
 144
 145/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 146 * struct sa_rx_data: RX Packet miscellaneous data place holder
 147 * @req: crypto request data pointer
 148 * @ddev: pointer to the DMA device
 149 * @tx_in: dma_async_tx_descriptor pointer for rx channel
 150 * @split_src_sg: Set if the src sg is split and needs to be freed up
 151 * @split_dst_sg: Set if the dst sg is split and needs to be freed up
 152 * @enc: Flag indicating either encryption or decryption
 153 * @enc_iv_size: Initialisation vector size
 154 * @iv_idx: Initialisation vector index
 155 * @rx_sg: Static scatterlist entry for overriding RX data
 156 * @tx_sg: Static scatterlist entry for overriding TX data
 157 * @src: Source data pointer
 158 * @dst: Destination data pointer
 159 */
 160struct sa_rx_data {
 161	void *req;
 162	struct device *ddev;
 163	struct dma_async_tx_descriptor *tx_in;
 164	struct scatterlist *split_src_sg;
 165	struct scatterlist *split_dst_sg;
 166	u8 enc;
 167	u8 enc_iv_size;
 168	u8 iv_idx;
 169	struct scatterlist rx_sg;
 170	struct scatterlist tx_sg;
 171	struct scatterlist *src;
 172	struct scatterlist *dst;
 173};
 174
 175/**
 176 * struct sa_req: SA request definition
 177 * @dev: device for the request
 178 * @size: total data to the xmitted via DMA
 179 * @enc_offset: offset of cipher data
 180 * @enc_size: data to be passed to cipher engine
 181 * @enc_iv: cipher IV
 182 * @auth_offset: offset of the authentication data
 183 * @auth_size: size of the authentication data
 184 * @auth_iv: authentication IV
 185 * @type: algorithm type for the request
 186 * @cmdl: command label pointer
 187 * @base: pointer to the base request
 188 * @ctx: pointer to the algorithm context data
 189 * @enc: true if this is an encode request
 190 * @src: source data
 191 * @dst: destination data
 192 * @callback: DMA callback for the request
 193 * @mdata_size: metadata size passed to DMA
 194 */
 195struct sa_req {
 196	struct device *dev;
 197	u16 size;
 198	u8 enc_offset;
 199	u16 enc_size;
 200	u8 *enc_iv;
 201	u8 auth_offset;
 202	u16 auth_size;
 203	u8 *auth_iv;
 204	u32 type;
 205	u32 *cmdl;
 206	struct crypto_async_request *base;
 207	struct sa_tfm_ctx *ctx;
 208	bool enc;
 209	struct scatterlist *src;
 210	struct scatterlist *dst;
 211	dma_async_tx_callback callback;
 212	u16 mdata_size;
 213};
 214
 215/*
 216 * Mode Control Instructions for various Key lengths 128, 192, 256
 217 * For CBC (Cipher Block Chaining) mode for encryption
 218 */
 219static u8 mci_cbc_enc_array[3][MODE_CONTROL_BYTES] = {
 220	{	0x61, 0x00, 0x00, 0x18, 0x88, 0x0a, 0xaa, 0x4b, 0x7e, 0x00,
 221		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 222		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
 223	{	0x61, 0x00, 0x00, 0x18, 0x88, 0x4a, 0xaa, 0x4b, 0x7e, 0x00,
 224		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 225		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
 226	{	0x61, 0x00, 0x00, 0x18, 0x88, 0x8a, 0xaa, 0x4b, 0x7e, 0x00,
 227		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 228		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
 229};
 230
 231/*
 232 * Mode Control Instructions for various Key lengths 128, 192, 256
 233 * For CBC (Cipher Block Chaining) mode for decryption
 234 */
 235static u8 mci_cbc_dec_array[3][MODE_CONTROL_BYTES] = {
 236	{	0x71, 0x00, 0x00, 0x80, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
 237		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 238		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
 239	{	0x71, 0x00, 0x00, 0x84, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
 240		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 241		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
 242	{	0x71, 0x00, 0x00, 0x88, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
 243		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 244		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
 245};
 246
 247/*
 248 * Mode Control Instructions for various Key lengths 128, 192, 256
 249 * For CBC (Cipher Block Chaining) mode for encryption
 250 */
 251static u8 mci_cbc_enc_no_iv_array[3][MODE_CONTROL_BYTES] = {
 252	{	0x21, 0x00, 0x00, 0x18, 0x88, 0x0a, 0xaa, 0x4b, 0x7e, 0x00,
 253		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 254		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
 255	{	0x21, 0x00, 0x00, 0x18, 0x88, 0x4a, 0xaa, 0x4b, 0x7e, 0x00,
 256		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 257		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
 258	{	0x21, 0x00, 0x00, 0x18, 0x88, 0x8a, 0xaa, 0x4b, 0x7e, 0x00,
 259		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 260		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
 261};
 262
 263/*
 264 * Mode Control Instructions for various Key lengths 128, 192, 256
 265 * For CBC (Cipher Block Chaining) mode for decryption
 266 */
 267static u8 mci_cbc_dec_no_iv_array[3][MODE_CONTROL_BYTES] = {
 268	{	0x31, 0x00, 0x00, 0x80, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
 269		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 270		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
 271	{	0x31, 0x00, 0x00, 0x84, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
 272		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 273		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
 274	{	0x31, 0x00, 0x00, 0x88, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
 275		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 276		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
 277};
 278
 279/*
 280 * Mode Control Instructions for various Key lengths 128, 192, 256
 281 * For ECB (Electronic Code Book) mode for encryption
 282 */
 283static u8 mci_ecb_enc_array[3][27] = {
 284	{	0x21, 0x00, 0x00, 0x80, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
 285		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 286		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
 287	{	0x21, 0x00, 0x00, 0x84, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
 288		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 289		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
 290	{	0x21, 0x00, 0x00, 0x88, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
 291		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 292		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
 293};
 294
 295/*
 296 * Mode Control Instructions for various Key lengths 128, 192, 256
 297 * For ECB (Electronic Code Book) mode for decryption
 298 */
 299static u8 mci_ecb_dec_array[3][27] = {
 300	{	0x31, 0x00, 0x00, 0x80, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
 301		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 302		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
 303	{	0x31, 0x00, 0x00, 0x84, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
 304		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 305		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
 306	{	0x31, 0x00, 0x00, 0x88, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
 307		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 308		0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00	},
 309};
 310
 311/*
 312 * Mode Control Instructions for DES algorithm
 313 * For CBC (Cipher Block Chaining) mode and ECB mode
 314 * encryption and for decryption respectively
 315 */
 316static u8 mci_cbc_3des_enc_array[MODE_CONTROL_BYTES] = {
 317	0x60, 0x00, 0x00, 0x18, 0x88, 0x52, 0xaa, 0x4b, 0x7e, 0x00, 0x00, 0x00,
 318	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 319	0x00, 0x00, 0x00,
 320};
 321
 322static u8 mci_cbc_3des_dec_array[MODE_CONTROL_BYTES] = {
 323	0x70, 0x00, 0x00, 0x85, 0x0a, 0xca, 0x98, 0xf4, 0x40, 0xc0, 0x00, 0x00,
 324	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 325	0x00, 0x00, 0x00,
 326};
 327
 328static u8 mci_ecb_3des_enc_array[MODE_CONTROL_BYTES] = {
 329	0x20, 0x00, 0x00, 0x85, 0x0a, 0x04, 0xb7, 0x90, 0x00, 0x00, 0x00, 0x00,
 330	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 331	0x00, 0x00, 0x00,
 332};
 333
 334static u8 mci_ecb_3des_dec_array[MODE_CONTROL_BYTES] = {
 335	0x30, 0x00, 0x00, 0x85, 0x0a, 0x04, 0xb7, 0x90, 0x00, 0x00, 0x00, 0x00,
 336	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 337	0x00, 0x00, 0x00,
 338};
 339
 340/*
 341 * Perform 16 byte or 128 bit swizzling
 342 * The SA2UL Expects the security context to
 343 * be in little Endian and the bus width is 128 bits or 16 bytes
 344 * Hence swap 16 bytes at a time from higher to lower address
 345 */
 346static void sa_swiz_128(u8 *in, u16 len)
 347{
 348	u8 data[16];
 349	int i, j;
 350
 351	for (i = 0; i < len; i += 16) {
 352		memcpy(data, &in[i], 16);
 353		for (j = 0; j < 16; j++)
 354			in[i + j] = data[15 - j];
 355	}
 356}
 357
 358/* Prepare the ipad and opad from key as per SHA algorithm step 1*/
 359static void prepare_kiopad(u8 *k_ipad, u8 *k_opad, const u8 *key, u16 key_sz)
 360{
 361	int i;
 362
 363	for (i = 0; i < key_sz; i++) {
 364		k_ipad[i] = key[i] ^ 0x36;
 
 
 
 
 
 
 
 
 
 
 
 365		k_opad[i] = key[i] ^ 0x5c;
 366	}
 367
 368	/* Instead of XOR with 0 */
 369	for (; i < SHA1_BLOCK_SIZE; i++) {
 370		k_ipad[i] = 0x36;
 371		k_opad[i] = 0x5c;
 372	}
 373}
 374
 375static void sa_export_shash(struct shash_desc *hash, int block_size,
 376			    int digest_size, __be32 *out)
 377{
 378	union {
 379		struct sha1_state sha1;
 380		struct sha256_state sha256;
 381		struct sha512_state sha512;
 382	} sha;
 383	void *state;
 384	u32 *result;
 385	int i;
 386
 387	switch (digest_size) {
 388	case SHA1_DIGEST_SIZE:
 389		state = &sha.sha1;
 390		result = sha.sha1.state;
 391		break;
 392	case SHA256_DIGEST_SIZE:
 393		state = &sha.sha256;
 394		result = sha.sha256.state;
 395		break;
 396	default:
 397		dev_err(sa_k3_dev, "%s: bad digest_size=%d\n", __func__,
 398			digest_size);
 399		return;
 400	}
 401
 402	crypto_shash_export(hash, state);
 403
 404	for (i = 0; i < digest_size >> 2; i++)
 405		out[i] = cpu_to_be32(result[i]);
 406}
 407
 408static void sa_prepare_iopads(struct algo_data *data, const u8 *key,
 409			      u16 key_sz, __be32 *ipad, __be32 *opad)
 410{
 411	SHASH_DESC_ON_STACK(shash, data->ctx->shash);
 412	int block_size = crypto_shash_blocksize(data->ctx->shash);
 413	int digest_size = crypto_shash_digestsize(data->ctx->shash);
 414	u8 k_ipad[SHA1_BLOCK_SIZE];
 415	u8 k_opad[SHA1_BLOCK_SIZE];
 
 
 
 416
 417	shash->tfm = data->ctx->shash;
 418
 419	prepare_kiopad(k_ipad, k_opad, key, key_sz);
 
 
 
 
 420
 421	memzero_explicit(ipad, block_size);
 422	memzero_explicit(opad, block_size);
 423
 424	crypto_shash_init(shash);
 425	crypto_shash_update(shash, k_ipad, block_size);
 426	sa_export_shash(shash, block_size, digest_size, ipad);
 427
 428	crypto_shash_init(shash);
 429	crypto_shash_update(shash, k_opad, block_size);
 430
 431	sa_export_shash(shash, block_size, digest_size, opad);
 432}
 433
 434/* Derive the inverse key used in AES-CBC decryption operation */
 435static inline int sa_aes_inv_key(u8 *inv_key, const u8 *key, u16 key_sz)
 436{
 437	struct crypto_aes_ctx ctx;
 438	int key_pos;
 439
 440	if (aes_expandkey(&ctx, key, key_sz)) {
 441		dev_err(sa_k3_dev, "%s: bad key len(%d)\n", __func__, key_sz);
 442		return -EINVAL;
 443	}
 444
 445	/* work around to get the right inverse for AES_KEYSIZE_192 size keys */
 446	if (key_sz == AES_KEYSIZE_192) {
 447		ctx.key_enc[52] = ctx.key_enc[51] ^ ctx.key_enc[46];
 448		ctx.key_enc[53] = ctx.key_enc[52] ^ ctx.key_enc[47];
 449	}
 450
 451	/* Based crypto_aes_expand_key logic */
 452	switch (key_sz) {
 453	case AES_KEYSIZE_128:
 454	case AES_KEYSIZE_192:
 455		key_pos = key_sz + 24;
 456		break;
 457
 458	case AES_KEYSIZE_256:
 459		key_pos = key_sz + 24 - 4;
 460		break;
 461
 462	default:
 463		dev_err(sa_k3_dev, "%s: bad key len(%d)\n", __func__, key_sz);
 464		return -EINVAL;
 465	}
 466
 467	memcpy(inv_key, &ctx.key_enc[key_pos], key_sz);
 468	return 0;
 469}
 470
 471/* Set Security context for the encryption engine */
 472static int sa_set_sc_enc(struct algo_data *ad, const u8 *key, u16 key_sz,
 473			 u8 enc, u8 *sc_buf)
 474{
 475	const u8 *mci = NULL;
 476
 477	/* Set Encryption mode selector to crypto processing */
 478	sc_buf[0] = SA_CRYPTO_PROCESSING;
 479
 480	if (enc)
 481		mci = ad->mci_enc;
 482	else
 483		mci = ad->mci_dec;
 484	/* Set the mode control instructions in security context */
 485	if (mci)
 486		memcpy(&sc_buf[1], mci, MODE_CONTROL_BYTES);
 487
 488	/* For AES-CBC decryption get the inverse key */
 489	if (ad->inv_key && !enc) {
 490		if (sa_aes_inv_key(&sc_buf[SC_ENC_KEY_OFFSET], key, key_sz))
 491			return -EINVAL;
 492	/* For all other cases: key is used */
 493	} else {
 494		memcpy(&sc_buf[SC_ENC_KEY_OFFSET], key, key_sz);
 495	}
 496
 497	return 0;
 498}
 499
 500/* Set Security context for the authentication engine */
 501static void sa_set_sc_auth(struct algo_data *ad, const u8 *key, u16 key_sz,
 502			   u8 *sc_buf)
 503{
 504	__be32 ipad[64], opad[64];
 
 505
 506	/* Set Authentication mode selector to hash processing */
 507	sc_buf[0] = SA_HASH_PROCESSING;
 508	/* Auth SW ctrl word: bit[6]=1 (upload computed hash to TLR section) */
 509	sc_buf[1] = SA_UPLOAD_HASH_TO_TLR;
 510	sc_buf[1] |= ad->auth_ctrl;
 511
 512	/* Copy the keys or ipad/opad */
 513	if (ad->keyed_mac) {
 514		ad->prep_iopad(ad, key, key_sz, ipad, opad);
 515
 516		/* Copy ipad to AuthKey */
 517		memcpy(&sc_buf[32], ipad, ad->hash_size);
 518		/* Copy opad to Aux-1 */
 519		memcpy(&sc_buf[64], opad, ad->hash_size);
 520	} else {
 521		/* basic hash */
 522		sc_buf[1] |= SA_BASIC_HASH;
 523	}
 524}
 525
 526static inline void sa_copy_iv(__be32 *out, const u8 *iv, bool size16)
 527{
 528	int j;
 529
 530	for (j = 0; j < ((size16) ? 4 : 2); j++) {
 531		*out = cpu_to_be32(*((u32 *)iv));
 532		iv += 4;
 533		out++;
 534	}
 535}
 536
 537/* Format general command label */
 538static int sa_format_cmdl_gen(struct sa_cmdl_cfg *cfg, u8 *cmdl,
 539			      struct sa_cmdl_upd_info *upd_info)
 540{
 541	u8 enc_offset = 0, auth_offset = 0, total = 0;
 542	u8 enc_next_eng = SA_ENG_ID_OUTPORT2;
 543	u8 auth_next_eng = SA_ENG_ID_OUTPORT2;
 544	u32 *word_ptr = (u32 *)cmdl;
 545	int i;
 546
 547	/* Clear the command label */
 548	memzero_explicit(cmdl, (SA_MAX_CMDL_WORDS * sizeof(u32)));
 549
 550	/* Iniialize the command update structure */
 551	memzero_explicit(upd_info, sizeof(*upd_info));
 552
 553	if (cfg->enc_eng_id && cfg->auth_eng_id) {
 554		if (cfg->enc) {
 555			auth_offset = SA_CMDL_HEADER_SIZE_BYTES;
 556			enc_next_eng = cfg->auth_eng_id;
 557
 558			if (cfg->iv_size)
 559				auth_offset += cfg->iv_size;
 560		} else {
 561			enc_offset = SA_CMDL_HEADER_SIZE_BYTES;
 562			auth_next_eng = cfg->enc_eng_id;
 563		}
 564	}
 565
 566	if (cfg->enc_eng_id) {
 567		upd_info->flags |= SA_CMDL_UPD_ENC;
 568		upd_info->enc_size.index = enc_offset >> 2;
 569		upd_info->enc_offset.index = upd_info->enc_size.index + 1;
 570		/* Encryption command label */
 571		cmdl[enc_offset + SA_CMDL_OFFSET_NESC] = enc_next_eng;
 572
 573		/* Encryption modes requiring IV */
 574		if (cfg->iv_size) {
 575			upd_info->flags |= SA_CMDL_UPD_ENC_IV;
 576			upd_info->enc_iv.index =
 577				(enc_offset + SA_CMDL_HEADER_SIZE_BYTES) >> 2;
 578			upd_info->enc_iv.size = cfg->iv_size;
 579
 580			cmdl[enc_offset + SA_CMDL_OFFSET_LABEL_LEN] =
 581				SA_CMDL_HEADER_SIZE_BYTES + cfg->iv_size;
 582
 583			cmdl[enc_offset + SA_CMDL_OFFSET_OPTION_CTRL1] =
 584				(SA_CTX_ENC_AUX2_OFFSET | (cfg->iv_size >> 3));
 585			total += SA_CMDL_HEADER_SIZE_BYTES + cfg->iv_size;
 586		} else {
 587			cmdl[enc_offset + SA_CMDL_OFFSET_LABEL_LEN] =
 588						SA_CMDL_HEADER_SIZE_BYTES;
 589			total += SA_CMDL_HEADER_SIZE_BYTES;
 590		}
 591	}
 592
 593	if (cfg->auth_eng_id) {
 594		upd_info->flags |= SA_CMDL_UPD_AUTH;
 595		upd_info->auth_size.index = auth_offset >> 2;
 596		upd_info->auth_offset.index = upd_info->auth_size.index + 1;
 597		cmdl[auth_offset + SA_CMDL_OFFSET_NESC] = auth_next_eng;
 598		cmdl[auth_offset + SA_CMDL_OFFSET_LABEL_LEN] =
 599			SA_CMDL_HEADER_SIZE_BYTES;
 600		total += SA_CMDL_HEADER_SIZE_BYTES;
 601	}
 602
 603	total = roundup(total, 8);
 604
 605	for (i = 0; i < total / 4; i++)
 606		word_ptr[i] = swab32(word_ptr[i]);
 607
 608	return total;
 609}
 610
 611/* Update Command label */
 612static inline void sa_update_cmdl(struct sa_req *req, u32 *cmdl,
 613				  struct sa_cmdl_upd_info *upd_info)
 614{
 615	int i = 0, j;
 616
 617	if (likely(upd_info->flags & SA_CMDL_UPD_ENC)) {
 618		cmdl[upd_info->enc_size.index] &= ~SA_CMDL_PAYLOAD_LENGTH_MASK;
 619		cmdl[upd_info->enc_size.index] |= req->enc_size;
 620		cmdl[upd_info->enc_offset.index] &=
 621						~SA_CMDL_SOP_BYPASS_LEN_MASK;
 622		cmdl[upd_info->enc_offset.index] |=
 623			((u32)req->enc_offset <<
 624			 __ffs(SA_CMDL_SOP_BYPASS_LEN_MASK));
 625
 626		if (likely(upd_info->flags & SA_CMDL_UPD_ENC_IV)) {
 627			__be32 *data = (__be32 *)&cmdl[upd_info->enc_iv.index];
 628			u32 *enc_iv = (u32 *)req->enc_iv;
 629
 630			for (j = 0; i < upd_info->enc_iv.size; i += 4, j++) {
 631				data[j] = cpu_to_be32(*enc_iv);
 632				enc_iv++;
 633			}
 634		}
 635	}
 636
 637	if (likely(upd_info->flags & SA_CMDL_UPD_AUTH)) {
 638		cmdl[upd_info->auth_size.index] &= ~SA_CMDL_PAYLOAD_LENGTH_MASK;
 639		cmdl[upd_info->auth_size.index] |= req->auth_size;
 640		cmdl[upd_info->auth_offset.index] &=
 641			~SA_CMDL_SOP_BYPASS_LEN_MASK;
 642		cmdl[upd_info->auth_offset.index] |=
 643			((u32)req->auth_offset <<
 644			 __ffs(SA_CMDL_SOP_BYPASS_LEN_MASK));
 645		if (upd_info->flags & SA_CMDL_UPD_AUTH_IV) {
 646			sa_copy_iv((void *)&cmdl[upd_info->auth_iv.index],
 647				   req->auth_iv,
 648				   (upd_info->auth_iv.size > 8));
 649		}
 650		if (upd_info->flags & SA_CMDL_UPD_AUX_KEY) {
 651			int offset = (req->auth_size & 0xF) ? 4 : 0;
 652
 653			memcpy(&cmdl[upd_info->aux_key_info.index],
 654			       &upd_info->aux_key[offset], 16);
 655		}
 656	}
 657}
 658
 659/* Format SWINFO words to be sent to SA */
 660static
 661void sa_set_swinfo(u8 eng_id, u16 sc_id, dma_addr_t sc_phys,
 662		   u8 cmdl_present, u8 cmdl_offset, u8 flags,
 663		   u8 hash_size, u32 *swinfo)
 664{
 665	swinfo[0] = sc_id;
 666	swinfo[0] |= (flags << __ffs(SA_SW0_FLAGS_MASK));
 667	if (likely(cmdl_present))
 668		swinfo[0] |= ((cmdl_offset | SA_SW0_CMDL_PRESENT) <<
 669						__ffs(SA_SW0_CMDL_INFO_MASK));
 670	swinfo[0] |= (eng_id << __ffs(SA_SW0_ENG_ID_MASK));
 671
 672	swinfo[0] |= SA_SW0_DEST_INFO_PRESENT;
 673	swinfo[1] = (u32)(sc_phys & 0xFFFFFFFFULL);
 674	swinfo[2] = (u32)((sc_phys & 0xFFFFFFFF00000000ULL) >> 32);
 675	swinfo[2] |= (hash_size << __ffs(SA_SW2_EGRESS_LENGTH));
 676}
 677
 678/* Dump the security context */
 679static void sa_dump_sc(u8 *buf, dma_addr_t dma_addr)
 680{
 681#ifdef DEBUG
 682	dev_info(sa_k3_dev, "Security context dump:: 0x%pad\n", &dma_addr);
 683	print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
 684		       16, 1, buf, SA_CTX_MAX_SZ, false);
 685#endif
 686}
 687
 688static
 689int sa_init_sc(struct sa_ctx_info *ctx, const u8 *enc_key,
 690	       u16 enc_key_sz, const u8 *auth_key, u16 auth_key_sz,
 
 691	       struct algo_data *ad, u8 enc, u32 *swinfo)
 692{
 693	int enc_sc_offset = 0;
 694	int auth_sc_offset = 0;
 695	u8 *sc_buf = ctx->sc;
 696	u16 sc_id = ctx->sc_id;
 697	u8 first_engine = 0;
 698
 699	memzero_explicit(sc_buf, SA_CTX_MAX_SZ);
 700
 701	if (ad->auth_eng.eng_id) {
 702		if (enc)
 703			first_engine = ad->enc_eng.eng_id;
 704		else
 705			first_engine = ad->auth_eng.eng_id;
 706
 707		enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ;
 708		auth_sc_offset = enc_sc_offset + ad->enc_eng.sc_size;
 709		sc_buf[1] = SA_SCCTL_FE_AUTH_ENC;
 710		if (!ad->hash_size)
 711			return -EINVAL;
 712		ad->hash_size = roundup(ad->hash_size, 8);
 713
 714	} else if (ad->enc_eng.eng_id && !ad->auth_eng.eng_id) {
 715		enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ;
 716		first_engine = ad->enc_eng.eng_id;
 717		sc_buf[1] = SA_SCCTL_FE_ENC;
 718		ad->hash_size = ad->iv_out_size;
 719	}
 720
 721	/* SCCTL Owner info: 0=host, 1=CP_ACE */
 722	sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0;
 723	memcpy(&sc_buf[2], &sc_id, 2);
 724	sc_buf[4] = 0x0;
 725	sc_buf[5] = PRIV_ID;
 726	sc_buf[6] = PRIV;
 727	sc_buf[7] = 0x0;
 728
 729	/* Prepare context for encryption engine */
 730	if (ad->enc_eng.sc_size) {
 731		if (sa_set_sc_enc(ad, enc_key, enc_key_sz, enc,
 732				  &sc_buf[enc_sc_offset]))
 733			return -EINVAL;
 734	}
 735
 736	/* Prepare context for authentication engine */
 737	if (ad->auth_eng.sc_size)
 738		sa_set_sc_auth(ad, auth_key, auth_key_sz,
 739			       &sc_buf[auth_sc_offset]);
 740
 741	/* Set the ownership of context to CP_ACE */
 742	sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0x80;
 743
 744	/* swizzle the security context */
 745	sa_swiz_128(sc_buf, SA_CTX_MAX_SZ);
 746
 747	sa_set_swinfo(first_engine, ctx->sc_id, ctx->sc_phys, 1, 0,
 748		      SA_SW_INFO_FLAG_EVICT, ad->hash_size, swinfo);
 749
 750	sa_dump_sc(sc_buf, ctx->sc_phys);
 751
 752	return 0;
 753}
 754
 755/* Free the per direction context memory */
 756static void sa_free_ctx_info(struct sa_ctx_info *ctx,
 757			     struct sa_crypto_data *data)
 758{
 759	unsigned long bn;
 760
 761	bn = ctx->sc_id - data->sc_id_start;
 762	spin_lock(&data->scid_lock);
 763	__clear_bit(bn, data->ctx_bm);
 764	data->sc_id--;
 765	spin_unlock(&data->scid_lock);
 766
 767	if (ctx->sc) {
 768		dma_pool_free(data->sc_pool, ctx->sc, ctx->sc_phys);
 769		ctx->sc = NULL;
 770	}
 771}
 772
 773static int sa_init_ctx_info(struct sa_ctx_info *ctx,
 774			    struct sa_crypto_data *data)
 775{
 776	unsigned long bn;
 777	int err;
 778
 779	spin_lock(&data->scid_lock);
 780	bn = find_first_zero_bit(data->ctx_bm, SA_MAX_NUM_CTX);
 781	__set_bit(bn, data->ctx_bm);
 782	data->sc_id++;
 783	spin_unlock(&data->scid_lock);
 784
 785	ctx->sc_id = (u16)(data->sc_id_start + bn);
 786
 787	ctx->sc = dma_pool_alloc(data->sc_pool, GFP_KERNEL, &ctx->sc_phys);
 788	if (!ctx->sc) {
 789		dev_err(&data->pdev->dev, "Failed to allocate SC memory\n");
 790		err = -ENOMEM;
 791		goto scid_rollback;
 792	}
 793
 794	return 0;
 795
 796scid_rollback:
 797	spin_lock(&data->scid_lock);
 798	__clear_bit(bn, data->ctx_bm);
 799	data->sc_id--;
 800	spin_unlock(&data->scid_lock);
 801
 802	return err;
 803}
 804
 805static void sa_cipher_cra_exit(struct crypto_skcipher *tfm)
 806{
 807	struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
 808	struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
 809
 810	dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
 811		__func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
 812		ctx->dec.sc_id, &ctx->dec.sc_phys);
 813
 814	sa_free_ctx_info(&ctx->enc, data);
 815	sa_free_ctx_info(&ctx->dec, data);
 816
 817	crypto_free_sync_skcipher(ctx->fallback.skcipher);
 818}
 819
 820static int sa_cipher_cra_init(struct crypto_skcipher *tfm)
 821{
 822	struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
 823	struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
 824	const char *name = crypto_tfm_alg_name(&tfm->base);
 
 825	int ret;
 826
 827	memzero_explicit(ctx, sizeof(*ctx));
 828	ctx->dev_data = data;
 829
 830	ret = sa_init_ctx_info(&ctx->enc, data);
 831	if (ret)
 832		return ret;
 833	ret = sa_init_ctx_info(&ctx->dec, data);
 834	if (ret) {
 835		sa_free_ctx_info(&ctx->enc, data);
 836		return ret;
 837	}
 838
 839	ctx->fallback.skcipher =
 840		crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
 841
 842	if (IS_ERR(ctx->fallback.skcipher)) {
 843		dev_err(sa_k3_dev, "Error allocating fallback algo %s\n", name);
 844		return PTR_ERR(ctx->fallback.skcipher);
 845	}
 846
 
 
 
 
 847	dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
 848		__func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
 849		ctx->dec.sc_id, &ctx->dec.sc_phys);
 850	return 0;
 851}
 852
 853static int sa_cipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
 854			    unsigned int keylen, struct algo_data *ad)
 855{
 856	struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
 
 857	int cmdl_len;
 858	struct sa_cmdl_cfg cfg;
 859	int ret;
 860
 861	if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
 862	    keylen != AES_KEYSIZE_256)
 863		return -EINVAL;
 864
 865	ad->enc_eng.eng_id = SA_ENG_ID_EM1;
 866	ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
 867
 868	memzero_explicit(&cfg, sizeof(cfg));
 869	cfg.enc_eng_id = ad->enc_eng.eng_id;
 870	cfg.iv_size = crypto_skcipher_ivsize(tfm);
 871
 872	crypto_sync_skcipher_clear_flags(ctx->fallback.skcipher,
 
 873					 CRYPTO_TFM_REQ_MASK);
 874	crypto_sync_skcipher_set_flags(ctx->fallback.skcipher,
 875				       tfm->base.crt_flags &
 876				       CRYPTO_TFM_REQ_MASK);
 877	ret = crypto_sync_skcipher_setkey(ctx->fallback.skcipher, key, keylen);
 878	if (ret)
 879		return ret;
 880
 881	/* Setup Encryption Security Context & Command label template */
 882	if (sa_init_sc(&ctx->enc, key, keylen, NULL, 0, ad, 1,
 883		       &ctx->enc.epib[1]))
 884		goto badkey;
 885
 886	cmdl_len = sa_format_cmdl_gen(&cfg,
 887				      (u8 *)ctx->enc.cmdl,
 888				      &ctx->enc.cmdl_upd_info);
 889	if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
 890		goto badkey;
 891
 892	ctx->enc.cmdl_size = cmdl_len;
 893
 894	/* Setup Decryption Security Context & Command label template */
 895	if (sa_init_sc(&ctx->dec, key, keylen, NULL, 0, ad, 0,
 896		       &ctx->dec.epib[1]))
 897		goto badkey;
 898
 899	cfg.enc_eng_id = ad->enc_eng.eng_id;
 900	cmdl_len = sa_format_cmdl_gen(&cfg, (u8 *)ctx->dec.cmdl,
 901				      &ctx->dec.cmdl_upd_info);
 902
 903	if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
 904		goto badkey;
 905
 906	ctx->dec.cmdl_size = cmdl_len;
 907	ctx->iv_idx = ad->iv_idx;
 908
 909	return 0;
 910
 911badkey:
 912	dev_err(sa_k3_dev, "%s: badkey\n", __func__);
 913	return -EINVAL;
 914}
 915
 916static int sa_aes_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key,
 917			     unsigned int keylen)
 918{
 919	struct algo_data ad = { 0 };
 920	/* Convert the key size (16/24/32) to the key size index (0/1/2) */
 921	int key_idx = (keylen >> 3) - 2;
 922
 923	if (key_idx >= 3)
 924		return -EINVAL;
 925
 926	ad.mci_enc = mci_cbc_enc_array[key_idx];
 927	ad.mci_dec = mci_cbc_dec_array[key_idx];
 928	ad.inv_key = true;
 929	ad.ealg_id = SA_EALG_ID_AES_CBC;
 930	ad.iv_idx = 4;
 931	ad.iv_out_size = 16;
 932
 933	return sa_cipher_setkey(tfm, key, keylen, &ad);
 934}
 935
 936static int sa_aes_ecb_setkey(struct crypto_skcipher *tfm, const u8 *key,
 937			     unsigned int keylen)
 938{
 939	struct algo_data ad = { 0 };
 940	/* Convert the key size (16/24/32) to the key size index (0/1/2) */
 941	int key_idx = (keylen >> 3) - 2;
 942
 943	if (key_idx >= 3)
 944		return -EINVAL;
 945
 946	ad.mci_enc = mci_ecb_enc_array[key_idx];
 947	ad.mci_dec = mci_ecb_dec_array[key_idx];
 948	ad.inv_key = true;
 949	ad.ealg_id = SA_EALG_ID_AES_ECB;
 950
 951	return sa_cipher_setkey(tfm, key, keylen, &ad);
 952}
 953
 954static int sa_3des_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key,
 955			      unsigned int keylen)
 956{
 957	struct algo_data ad = { 0 };
 958
 959	ad.mci_enc = mci_cbc_3des_enc_array;
 960	ad.mci_dec = mci_cbc_3des_dec_array;
 961	ad.ealg_id = SA_EALG_ID_3DES_CBC;
 962	ad.iv_idx = 6;
 963	ad.iv_out_size = 8;
 964
 965	return sa_cipher_setkey(tfm, key, keylen, &ad);
 966}
 967
 968static int sa_3des_ecb_setkey(struct crypto_skcipher *tfm, const u8 *key,
 969			      unsigned int keylen)
 970{
 971	struct algo_data ad = { 0 };
 972
 973	ad.mci_enc = mci_ecb_3des_enc_array;
 974	ad.mci_dec = mci_ecb_3des_dec_array;
 975
 976	return sa_cipher_setkey(tfm, key, keylen, &ad);
 977}
 978
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 979static void sa_aes_dma_in_callback(void *data)
 980{
 981	struct sa_rx_data *rxd = (struct sa_rx_data *)data;
 982	struct skcipher_request *req;
 983	int sglen;
 984	u32 *result;
 985	__be32 *mdptr;
 986	size_t ml, pl;
 987	int i;
 988	enum dma_data_direction dir_src;
 989	bool diff_dst;
 990
 
 991	req = container_of(rxd->req, struct skcipher_request, base);
 992	sglen = sg_nents_for_len(req->src, req->cryptlen);
 993
 994	diff_dst = (req->src != req->dst) ? true : false;
 995	dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
 996
 997	if (req->iv) {
 998		mdptr = (__be32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl,
 999							       &ml);
1000		result = (u32 *)req->iv;
1001
1002		for (i = 0; i < (rxd->enc_iv_size / 4); i++)
1003			result[i] = be32_to_cpu(mdptr[i + rxd->iv_idx]);
1004	}
1005
1006	dma_unmap_sg(rxd->ddev, req->src, sglen, dir_src);
1007	kfree(rxd->split_src_sg);
1008
1009	if (diff_dst) {
1010		sglen = sg_nents_for_len(req->dst, req->cryptlen);
1011
1012		dma_unmap_sg(rxd->ddev, req->dst, sglen,
1013			     DMA_FROM_DEVICE);
1014		kfree(rxd->split_dst_sg);
1015	}
1016
1017	kfree(rxd);
1018
1019	skcipher_request_complete(req, 0);
1020}
1021
1022static void
1023sa_prepare_tx_desc(u32 *mdptr, u32 pslen, u32 *psdata, u32 epiblen, u32 *epib)
1024{
1025	u32 *out, *in;
1026	int i;
1027
1028	for (out = mdptr, in = epib, i = 0; i < epiblen / sizeof(u32); i++)
1029		*out++ = *in++;
1030
1031	mdptr[4] = (0xFFFF << 16);
1032	for (out = &mdptr[5], in = psdata, i = 0;
1033	     i < pslen / sizeof(u32); i++)
1034		*out++ = *in++;
1035}
1036
1037static int sa_run(struct sa_req *req)
1038{
1039	struct sa_rx_data *rxd;
1040	gfp_t gfp_flags;
1041	u32 cmdl[SA_MAX_CMDL_WORDS];
1042	struct sa_crypto_data *pdata = dev_get_drvdata(sa_k3_dev);
1043	struct device *ddev;
1044	struct dma_chan *dma_rx;
1045	int sg_nents, src_nents, dst_nents;
1046	int mapped_src_nents, mapped_dst_nents;
1047	struct scatterlist *src, *dst;
1048	size_t pl, ml, split_size;
1049	struct sa_ctx_info *sa_ctx = req->enc ? &req->ctx->enc : &req->ctx->dec;
1050	int ret;
1051	struct dma_async_tx_descriptor *tx_out;
1052	u32 *mdptr;
1053	bool diff_dst;
1054	enum dma_data_direction dir_src;
 
1055
1056	gfp_flags = req->base->flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
1057		GFP_KERNEL : GFP_ATOMIC;
1058
1059	rxd = kzalloc(sizeof(*rxd), gfp_flags);
1060	if (!rxd)
1061		return -ENOMEM;
1062
1063	if (req->src != req->dst) {
1064		diff_dst = true;
1065		dir_src = DMA_TO_DEVICE;
1066	} else {
1067		diff_dst = false;
1068		dir_src = DMA_BIDIRECTIONAL;
1069	}
1070
1071	/*
1072	 * SA2UL has an interesting feature where the receive DMA channel
1073	 * is selected based on the data passed to the engine. Within the
1074	 * transition range, there is also a space where it is impossible
1075	 * to determine where the data will end up, and this should be
1076	 * avoided. This will be handled by the SW fallback mechanism by
1077	 * the individual algorithm implementations.
1078	 */
1079	if (req->size >= 256)
1080		dma_rx = pdata->dma_rx2;
1081	else
1082		dma_rx = pdata->dma_rx1;
1083
1084	ddev = dma_rx->device->dev;
 
1085
1086	memcpy(cmdl, sa_ctx->cmdl, sa_ctx->cmdl_size);
1087
1088	sa_update_cmdl(req, cmdl, &sa_ctx->cmdl_upd_info);
1089
1090	if (req->type != CRYPTO_ALG_TYPE_AHASH) {
1091		if (req->enc)
1092			req->type |=
1093				(SA_REQ_SUBTYPE_ENC << SA_REQ_SUBTYPE_SHIFT);
1094		else
1095			req->type |=
1096				(SA_REQ_SUBTYPE_DEC << SA_REQ_SUBTYPE_SHIFT);
1097	}
1098
1099	cmdl[sa_ctx->cmdl_size / sizeof(u32)] = req->type;
1100
1101	/*
1102	 * Map the packets, first we check if the data fits into a single
1103	 * sg entry and use that if possible. If it does not fit, we check
1104	 * if we need to do sg_split to align the scatterlist data on the
1105	 * actual data size being processed by the crypto engine.
1106	 */
1107	src = req->src;
1108	sg_nents = sg_nents_for_len(src, req->size);
1109
1110	split_size = req->size;
1111
 
1112	if (sg_nents == 1 && split_size <= req->src->length) {
1113		src = &rxd->rx_sg;
 
1114		sg_init_table(src, 1);
1115		sg_set_page(src, sg_page(req->src), split_size,
1116			    req->src->offset);
1117		src_nents = 1;
1118		dma_map_sg(ddev, src, sg_nents, dir_src);
 
 
 
 
 
 
 
 
 
1119	} else {
1120		mapped_src_nents = dma_map_sg(ddev, req->src, sg_nents,
1121					      dir_src);
1122		ret = sg_split(req->src, mapped_src_nents, 0, 1, &split_size,
1123			       &src, &src_nents, gfp_flags);
 
 
 
 
 
 
 
 
 
1124		if (ret) {
1125			src_nents = sg_nents;
1126			src = req->src;
1127		} else {
1128			rxd->split_src_sg = src;
1129		}
1130	}
1131
 
 
1132	if (!diff_dst) {
1133		dst_nents = src_nents;
1134		dst = src;
1135	} else {
1136		dst_nents = sg_nents_for_len(req->dst, req->size);
 
1137
1138		if (dst_nents == 1 && split_size <= req->dst->length) {
1139			dst = &rxd->tx_sg;
 
1140			sg_init_table(dst, 1);
1141			sg_set_page(dst, sg_page(req->dst), split_size,
1142				    req->dst->offset);
1143			dst_nents = 1;
1144			dma_map_sg(ddev, dst, dst_nents, DMA_FROM_DEVICE);
 
 
 
 
 
 
 
 
1145		} else {
1146			mapped_dst_nents = dma_map_sg(ddev, req->dst, dst_nents,
1147						      DMA_FROM_DEVICE);
1148			ret = sg_split(req->dst, mapped_dst_nents, 0, 1,
1149				       &split_size, &dst, &dst_nents,
 
 
 
 
 
 
 
 
1150				       gfp_flags);
1151			if (ret) {
1152				dst_nents = dst_nents;
1153				dst = req->dst;
1154			} else {
1155				rxd->split_dst_sg = dst;
1156			}
1157		}
1158	}
1159
1160	if (unlikely(src_nents != sg_nents)) {
1161		dev_warn_ratelimited(sa_k3_dev, "failed to map tx pkt\n");
1162		ret = -EIO;
1163		goto err_cleanup;
1164	}
1165
1166	rxd->tx_in = dmaengine_prep_slave_sg(dma_rx, dst, dst_nents,
1167					     DMA_DEV_TO_MEM,
1168					     DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1169	if (!rxd->tx_in) {
1170		dev_err(pdata->dev, "IN prep_slave_sg() failed\n");
1171		ret = -EINVAL;
1172		goto err_cleanup;
1173	}
1174
1175	rxd->req = (void *)req->base;
1176	rxd->enc = req->enc;
1177	rxd->ddev = ddev;
1178	rxd->src = src;
1179	rxd->dst = dst;
1180	rxd->iv_idx = req->ctx->iv_idx;
1181	rxd->enc_iv_size = sa_ctx->cmdl_upd_info.enc_iv.size;
1182	rxd->tx_in->callback = req->callback;
1183	rxd->tx_in->callback_param = rxd;
1184
1185	tx_out = dmaengine_prep_slave_sg(pdata->dma_tx, src,
1186					 src_nents, DMA_MEM_TO_DEV,
1187					 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1188
1189	if (!tx_out) {
1190		dev_err(pdata->dev, "OUT prep_slave_sg() failed\n");
1191		ret = -EINVAL;
1192		goto err_cleanup;
1193	}
1194
1195	/*
1196	 * Prepare metadata for DMA engine. This essentially describes the
1197	 * crypto algorithm to be used, data sizes, different keys etc.
1198	 */
1199	mdptr = (u32 *)dmaengine_desc_get_metadata_ptr(tx_out, &pl, &ml);
1200
1201	sa_prepare_tx_desc(mdptr, (sa_ctx->cmdl_size + (SA_PSDATA_CTX_WORDS *
1202				   sizeof(u32))), cmdl, sizeof(sa_ctx->epib),
1203			   sa_ctx->epib);
1204
1205	ml = sa_ctx->cmdl_size + (SA_PSDATA_CTX_WORDS * sizeof(u32));
1206	dmaengine_desc_set_metadata_len(tx_out, req->mdata_size);
1207
1208	dmaengine_submit(tx_out);
1209	dmaengine_submit(rxd->tx_in);
1210
1211	dma_async_issue_pending(dma_rx);
1212	dma_async_issue_pending(pdata->dma_tx);
1213
1214	return -EINPROGRESS;
1215
1216err_cleanup:
1217	dma_unmap_sg(ddev, req->src, sg_nents, DMA_TO_DEVICE);
1218	kfree(rxd->split_src_sg);
1219
1220	if (req->src != req->dst) {
1221		dst_nents = sg_nents_for_len(req->dst, req->size);
1222		dma_unmap_sg(ddev, req->dst, dst_nents, DMA_FROM_DEVICE);
1223		kfree(rxd->split_dst_sg);
1224	}
1225
1226	kfree(rxd);
1227
1228	return ret;
1229}
1230
1231static int sa_cipher_run(struct skcipher_request *req, u8 *iv, int enc)
1232{
1233	struct sa_tfm_ctx *ctx =
1234	    crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
1235	struct crypto_alg *alg = req->base.tfm->__crt_alg;
1236	struct sa_req sa_req = { 0 };
1237	int ret;
1238
1239	if (!req->cryptlen)
1240		return 0;
1241
1242	if (req->cryptlen % alg->cra_blocksize)
1243		return -EINVAL;
1244
1245	/* Use SW fallback if the data size is not supported */
1246	if (req->cryptlen > SA_MAX_DATA_SZ ||
1247	    (req->cryptlen >= SA_UNSAFE_DATA_SZ_MIN &&
1248	     req->cryptlen <= SA_UNSAFE_DATA_SZ_MAX)) {
1249		SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback.skcipher);
1250
1251		skcipher_request_set_sync_tfm(subreq, ctx->fallback.skcipher);
1252		skcipher_request_set_callback(subreq, req->base.flags,
1253					      NULL, NULL);
 
1254		skcipher_request_set_crypt(subreq, req->src, req->dst,
1255					   req->cryptlen, req->iv);
1256		if (enc)
1257			ret = crypto_skcipher_encrypt(subreq);
1258		else
1259			ret = crypto_skcipher_decrypt(subreq);
1260
1261		skcipher_request_zero(subreq);
1262		return ret;
1263	}
1264
1265	sa_req.size = req->cryptlen;
1266	sa_req.enc_size = req->cryptlen;
1267	sa_req.src = req->src;
1268	sa_req.dst = req->dst;
1269	sa_req.enc_iv = iv;
1270	sa_req.type = CRYPTO_ALG_TYPE_SKCIPHER;
1271	sa_req.enc = enc;
1272	sa_req.callback = sa_aes_dma_in_callback;
1273	sa_req.mdata_size = 44;
1274	sa_req.base = &req->base;
1275	sa_req.ctx = ctx;
1276
1277	return sa_run(&sa_req);
1278}
1279
1280static int sa_encrypt(struct skcipher_request *req)
1281{
1282	return sa_cipher_run(req, req->iv, 1);
1283}
1284
1285static int sa_decrypt(struct skcipher_request *req)
1286{
1287	return sa_cipher_run(req, req->iv, 0);
1288}
1289
1290static void sa_sha_dma_in_callback(void *data)
1291{
1292	struct sa_rx_data *rxd = (struct sa_rx_data *)data;
1293	struct ahash_request *req;
1294	struct crypto_ahash *tfm;
1295	unsigned int authsize;
1296	int i, sg_nents;
1297	size_t ml, pl;
1298	u32 *result;
1299	__be32 *mdptr;
1300
 
1301	req = container_of(rxd->req, struct ahash_request, base);
1302	tfm = crypto_ahash_reqtfm(req);
1303	authsize = crypto_ahash_digestsize(tfm);
1304
1305	mdptr = (__be32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl, &ml);
1306	result = (u32 *)req->result;
1307
1308	for (i = 0; i < (authsize / 4); i++)
1309		result[i] = be32_to_cpu(mdptr[i + 4]);
1310
1311	sg_nents = sg_nents_for_len(req->src, req->nbytes);
1312	dma_unmap_sg(rxd->ddev, req->src, sg_nents, DMA_FROM_DEVICE);
1313
1314	kfree(rxd->split_src_sg);
1315
1316	kfree(rxd);
1317
1318	ahash_request_complete(req, 0);
1319}
1320
1321static int zero_message_process(struct ahash_request *req)
1322{
1323	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1324	int sa_digest_size = crypto_ahash_digestsize(tfm);
1325
1326	switch (sa_digest_size) {
1327	case SHA1_DIGEST_SIZE:
1328		memcpy(req->result, sha1_zero_message_hash, sa_digest_size);
1329		break;
1330	case SHA256_DIGEST_SIZE:
1331		memcpy(req->result, sha256_zero_message_hash, sa_digest_size);
1332		break;
1333	case SHA512_DIGEST_SIZE:
1334		memcpy(req->result, sha512_zero_message_hash, sa_digest_size);
1335		break;
1336	default:
1337		return -EINVAL;
1338	}
1339
1340	return 0;
1341}
1342
1343static int sa_sha_run(struct ahash_request *req)
1344{
1345	struct sa_tfm_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
1346	struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1347	struct sa_req sa_req = { 0 };
1348	size_t auth_len;
1349
1350	auth_len = req->nbytes;
1351
1352	if (!auth_len)
1353		return zero_message_process(req);
1354
1355	if (auth_len > SA_MAX_DATA_SZ ||
1356	    (auth_len >= SA_UNSAFE_DATA_SZ_MIN &&
1357	     auth_len <= SA_UNSAFE_DATA_SZ_MAX)) {
1358		struct ahash_request *subreq = &rctx->fallback_req;
1359		int ret = 0;
1360
1361		ahash_request_set_tfm(subreq, ctx->fallback.ahash);
1362		subreq->base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1363
1364		crypto_ahash_init(subreq);
1365
1366		subreq->nbytes = auth_len;
1367		subreq->src = req->src;
1368		subreq->result = req->result;
1369
1370		ret |= crypto_ahash_update(subreq);
1371
1372		subreq->nbytes = 0;
1373
1374		ret |= crypto_ahash_final(subreq);
1375
1376		return ret;
1377	}
1378
1379	sa_req.size = auth_len;
1380	sa_req.auth_size = auth_len;
1381	sa_req.src = req->src;
1382	sa_req.dst = req->src;
1383	sa_req.enc = true;
1384	sa_req.type = CRYPTO_ALG_TYPE_AHASH;
1385	sa_req.callback = sa_sha_dma_in_callback;
1386	sa_req.mdata_size = 28;
1387	sa_req.ctx = ctx;
1388	sa_req.base = &req->base;
1389
1390	return sa_run(&sa_req);
1391}
1392
1393static int sa_sha_setup(struct sa_tfm_ctx *ctx, struct  algo_data *ad)
1394{
1395	int bs = crypto_shash_blocksize(ctx->shash);
1396	int cmdl_len;
1397	struct sa_cmdl_cfg cfg;
1398
1399	ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
1400	ad->auth_eng.eng_id = SA_ENG_ID_AM1;
1401	ad->auth_eng.sc_size = SA_CTX_AUTH_TYPE2_SZ;
1402
1403	memset(ctx->authkey, 0, bs);
1404	memset(&cfg, 0, sizeof(cfg));
1405	cfg.aalg = ad->aalg_id;
1406	cfg.enc_eng_id = ad->enc_eng.eng_id;
1407	cfg.auth_eng_id = ad->auth_eng.eng_id;
1408	cfg.iv_size = 0;
1409	cfg.akey = NULL;
1410	cfg.akey_len = 0;
1411
 
1412	/* Setup Encryption Security Context & Command label template */
1413	if (sa_init_sc(&ctx->enc, NULL, 0, NULL, 0, ad, 0,
1414		       &ctx->enc.epib[1]))
1415		goto badkey;
1416
1417	cmdl_len = sa_format_cmdl_gen(&cfg,
1418				      (u8 *)ctx->enc.cmdl,
1419				      &ctx->enc.cmdl_upd_info);
1420	if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
1421		goto badkey;
1422
1423	ctx->enc.cmdl_size = cmdl_len;
1424
1425	return 0;
1426
1427badkey:
1428	dev_err(sa_k3_dev, "%s: badkey\n", __func__);
1429	return -EINVAL;
1430}
1431
1432static int sa_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
1433{
1434	struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1435	struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
1436	int ret;
1437
1438	memset(ctx, 0, sizeof(*ctx));
1439	ctx->dev_data = data;
1440	ret = sa_init_ctx_info(&ctx->enc, data);
1441	if (ret)
1442		return ret;
1443
1444	if (alg_base) {
1445		ctx->shash = crypto_alloc_shash(alg_base, 0,
1446						CRYPTO_ALG_NEED_FALLBACK);
1447		if (IS_ERR(ctx->shash)) {
1448			dev_err(sa_k3_dev, "base driver %s couldn't be loaded\n",
1449				alg_base);
1450			return PTR_ERR(ctx->shash);
1451		}
1452		/* for fallback */
1453		ctx->fallback.ahash =
1454			crypto_alloc_ahash(alg_base, 0,
1455					   CRYPTO_ALG_NEED_FALLBACK);
1456		if (IS_ERR(ctx->fallback.ahash)) {
1457			dev_err(ctx->dev_data->dev,
1458				"Could not load fallback driver\n");
1459			return PTR_ERR(ctx->fallback.ahash);
1460		}
1461	}
1462
1463	dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
1464		__func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
1465		ctx->dec.sc_id, &ctx->dec.sc_phys);
1466
1467	crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1468				 sizeof(struct sa_sha_req_ctx) +
1469				 crypto_ahash_reqsize(ctx->fallback.ahash));
1470
1471	return 0;
1472}
1473
1474static int sa_sha_digest(struct ahash_request *req)
1475{
1476	return sa_sha_run(req);
1477}
1478
1479static int sa_sha_init(struct ahash_request *req)
1480{
1481	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1482	struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1483	struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1484
1485	dev_dbg(sa_k3_dev, "init: digest size: %d, rctx=%llx\n",
1486		crypto_ahash_digestsize(tfm), (u64)rctx);
1487
1488	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1489	rctx->fallback_req.base.flags =
1490		req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1491
1492	return crypto_ahash_init(&rctx->fallback_req);
1493}
1494
1495static int sa_sha_update(struct ahash_request *req)
1496{
1497	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1498	struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1499	struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1500
1501	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1502	rctx->fallback_req.base.flags =
1503		req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1504	rctx->fallback_req.nbytes = req->nbytes;
1505	rctx->fallback_req.src = req->src;
1506
1507	return crypto_ahash_update(&rctx->fallback_req);
1508}
1509
1510static int sa_sha_final(struct ahash_request *req)
1511{
1512	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1513	struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1514	struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1515
1516	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1517	rctx->fallback_req.base.flags =
1518		req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1519	rctx->fallback_req.result = req->result;
1520
1521	return crypto_ahash_final(&rctx->fallback_req);
1522}
1523
1524static int sa_sha_finup(struct ahash_request *req)
1525{
1526	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1527	struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1528	struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1529
1530	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1531	rctx->fallback_req.base.flags =
1532		req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1533
1534	rctx->fallback_req.nbytes = req->nbytes;
1535	rctx->fallback_req.src = req->src;
1536	rctx->fallback_req.result = req->result;
1537
1538	return crypto_ahash_finup(&rctx->fallback_req);
1539}
1540
1541static int sa_sha_import(struct ahash_request *req, const void *in)
1542{
1543	struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1544	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1545	struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1546
1547	ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1548	rctx->fallback_req.base.flags = req->base.flags &
1549		CRYPTO_TFM_REQ_MAY_SLEEP;
1550
1551	return crypto_ahash_import(&rctx->fallback_req, in);
1552}
1553
1554static int sa_sha_export(struct ahash_request *req, void *out)
1555{
1556	struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1557	struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1558	struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1559	struct ahash_request *subreq = &rctx->fallback_req;
1560
1561	ahash_request_set_tfm(subreq, ctx->fallback.ahash);
1562	subreq->base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1563
1564	return crypto_ahash_export(subreq, out);
1565}
1566
1567static int sa_sha1_cra_init(struct crypto_tfm *tfm)
1568{
1569	struct algo_data ad = { 0 };
1570	struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1571
1572	sa_sha_cra_init_alg(tfm, "sha1");
1573
1574	ad.aalg_id = SA_AALG_ID_SHA1;
1575	ad.hash_size = SHA1_DIGEST_SIZE;
1576	ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA1;
1577
1578	sa_sha_setup(ctx, &ad);
1579
1580	return 0;
1581}
1582
1583static int sa_sha256_cra_init(struct crypto_tfm *tfm)
1584{
1585	struct algo_data ad = { 0 };
1586	struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1587
1588	sa_sha_cra_init_alg(tfm, "sha256");
1589
1590	ad.aalg_id = SA_AALG_ID_SHA2_256;
1591	ad.hash_size = SHA256_DIGEST_SIZE;
1592	ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA256;
1593
1594	sa_sha_setup(ctx, &ad);
1595
1596	return 0;
1597}
1598
1599static int sa_sha512_cra_init(struct crypto_tfm *tfm)
1600{
1601	struct algo_data ad = { 0 };
1602	struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1603
1604	sa_sha_cra_init_alg(tfm, "sha512");
1605
1606	ad.aalg_id = SA_AALG_ID_SHA2_512;
1607	ad.hash_size = SHA512_DIGEST_SIZE;
1608	ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA512;
1609
1610	sa_sha_setup(ctx, &ad);
1611
1612	return 0;
1613}
1614
1615static void sa_sha_cra_exit(struct crypto_tfm *tfm)
1616{
1617	struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1618	struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
1619
1620	dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
1621		__func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
1622		ctx->dec.sc_id, &ctx->dec.sc_phys);
1623
1624	if (crypto_tfm_alg_type(tfm) == CRYPTO_ALG_TYPE_AHASH)
1625		sa_free_ctx_info(&ctx->enc, data);
1626
1627	crypto_free_shash(ctx->shash);
1628	crypto_free_ahash(ctx->fallback.ahash);
1629}
1630
1631static void sa_aead_dma_in_callback(void *data)
1632{
1633	struct sa_rx_data *rxd = (struct sa_rx_data *)data;
1634	struct aead_request *req;
1635	struct crypto_aead *tfm;
1636	unsigned int start;
1637	unsigned int authsize;
1638	u8 auth_tag[SA_MAX_AUTH_TAG_SZ];
1639	size_t pl, ml;
1640	int i, sglen;
1641	int err = 0;
1642	u16 auth_len;
1643	u32 *mdptr;
1644	bool diff_dst;
1645	enum dma_data_direction dir_src;
1646
 
1647	req = container_of(rxd->req, struct aead_request, base);
1648	tfm = crypto_aead_reqtfm(req);
1649	start = req->assoclen + req->cryptlen;
1650	authsize = crypto_aead_authsize(tfm);
1651
1652	diff_dst = (req->src != req->dst) ? true : false;
1653	dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
1654
1655	mdptr = (u32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl, &ml);
1656	for (i = 0; i < (authsize / 4); i++)
1657		mdptr[i + 4] = swab32(mdptr[i + 4]);
1658
1659	auth_len = req->assoclen + req->cryptlen;
1660	if (!rxd->enc)
1661		auth_len -= authsize;
1662
1663	sglen =  sg_nents_for_len(rxd->src, auth_len);
1664	dma_unmap_sg(rxd->ddev, rxd->src, sglen, dir_src);
1665	kfree(rxd->split_src_sg);
1666
1667	if (diff_dst) {
1668		sglen = sg_nents_for_len(rxd->dst, auth_len);
1669		dma_unmap_sg(rxd->ddev, rxd->dst, sglen, DMA_FROM_DEVICE);
1670		kfree(rxd->split_dst_sg);
1671	}
1672
1673	if (rxd->enc) {
1674		scatterwalk_map_and_copy(&mdptr[4], req->dst, start, authsize,
1675					 1);
1676	} else {
1677		start -= authsize;
1678		scatterwalk_map_and_copy(auth_tag, req->src, start, authsize,
1679					 0);
1680
1681		err = memcmp(&mdptr[4], auth_tag, authsize) ? -EBADMSG : 0;
1682	}
1683
1684	kfree(rxd);
1685
1686	aead_request_complete(req, err);
1687}
1688
1689static int sa_cra_init_aead(struct crypto_aead *tfm, const char *hash,
1690			    const char *fallback)
1691{
1692	struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
1693	struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
1694	int ret;
1695
1696	memzero_explicit(ctx, sizeof(*ctx));
 
1697
1698	ctx->shash = crypto_alloc_shash(hash, 0, CRYPTO_ALG_NEED_FALLBACK);
1699	if (IS_ERR(ctx->shash)) {
1700		dev_err(sa_k3_dev, "base driver %s couldn't be loaded\n", hash);
1701		return PTR_ERR(ctx->shash);
1702	}
1703
1704	ctx->fallback.aead = crypto_alloc_aead(fallback, 0,
1705					       CRYPTO_ALG_NEED_FALLBACK);
1706
1707	if (IS_ERR(ctx->fallback.aead)) {
1708		dev_err(sa_k3_dev, "fallback driver %s couldn't be loaded\n",
1709			fallback);
1710		return PTR_ERR(ctx->fallback.aead);
1711	}
1712
1713	crypto_aead_set_reqsize(tfm, sizeof(struct aead_request) +
1714				crypto_aead_reqsize(ctx->fallback.aead));
1715
1716	ret = sa_init_ctx_info(&ctx->enc, data);
1717	if (ret)
1718		return ret;
1719
1720	ret = sa_init_ctx_info(&ctx->dec, data);
1721	if (ret) {
1722		sa_free_ctx_info(&ctx->enc, data);
1723		return ret;
1724	}
1725
1726	dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
1727		__func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
1728		ctx->dec.sc_id, &ctx->dec.sc_phys);
1729
1730	return ret;
1731}
1732
1733static int sa_cra_init_aead_sha1(struct crypto_aead *tfm)
1734{
1735	return sa_cra_init_aead(tfm, "sha1",
1736				"authenc(hmac(sha1-ce),cbc(aes-ce))");
1737}
1738
1739static int sa_cra_init_aead_sha256(struct crypto_aead *tfm)
1740{
1741	return sa_cra_init_aead(tfm, "sha256",
1742				"authenc(hmac(sha256-ce),cbc(aes-ce))");
1743}
1744
1745static void sa_exit_tfm_aead(struct crypto_aead *tfm)
1746{
1747	struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
1748	struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
1749
1750	crypto_free_shash(ctx->shash);
1751	crypto_free_aead(ctx->fallback.aead);
1752
1753	sa_free_ctx_info(&ctx->enc, data);
1754	sa_free_ctx_info(&ctx->dec, data);
1755}
1756
1757/* AEAD algorithm configuration interface function */
1758static int sa_aead_setkey(struct crypto_aead *authenc,
1759			  const u8 *key, unsigned int keylen,
1760			  struct algo_data *ad)
1761{
1762	struct sa_tfm_ctx *ctx = crypto_aead_ctx(authenc);
1763	struct crypto_authenc_keys keys;
1764	int cmdl_len;
1765	struct sa_cmdl_cfg cfg;
1766	int key_idx;
1767
1768	if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
1769		return -EINVAL;
1770
1771	/* Convert the key size (16/24/32) to the key size index (0/1/2) */
1772	key_idx = (keys.enckeylen >> 3) - 2;
1773	if (key_idx >= 3)
1774		return -EINVAL;
1775
1776	ad->ctx = ctx;
1777	ad->enc_eng.eng_id = SA_ENG_ID_EM1;
1778	ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
1779	ad->auth_eng.eng_id = SA_ENG_ID_AM1;
1780	ad->auth_eng.sc_size = SA_CTX_AUTH_TYPE2_SZ;
1781	ad->mci_enc = mci_cbc_enc_no_iv_array[key_idx];
1782	ad->mci_dec = mci_cbc_dec_no_iv_array[key_idx];
1783	ad->inv_key = true;
1784	ad->keyed_mac = true;
1785	ad->ealg_id = SA_EALG_ID_AES_CBC;
1786	ad->prep_iopad = sa_prepare_iopads;
1787
1788	memset(&cfg, 0, sizeof(cfg));
1789	cfg.enc = true;
1790	cfg.aalg = ad->aalg_id;
1791	cfg.enc_eng_id = ad->enc_eng.eng_id;
1792	cfg.auth_eng_id = ad->auth_eng.eng_id;
1793	cfg.iv_size = crypto_aead_ivsize(authenc);
1794	cfg.akey = keys.authkey;
1795	cfg.akey_len = keys.authkeylen;
1796
1797	/* Setup Encryption Security Context & Command label template */
1798	if (sa_init_sc(&ctx->enc, keys.enckey, keys.enckeylen,
1799		       keys.authkey, keys.authkeylen,
1800		       ad, 1, &ctx->enc.epib[1]))
1801		return -EINVAL;
1802
1803	cmdl_len = sa_format_cmdl_gen(&cfg,
1804				      (u8 *)ctx->enc.cmdl,
1805				      &ctx->enc.cmdl_upd_info);
1806	if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
1807		return -EINVAL;
1808
1809	ctx->enc.cmdl_size = cmdl_len;
1810
1811	/* Setup Decryption Security Context & Command label template */
1812	if (sa_init_sc(&ctx->dec, keys.enckey, keys.enckeylen,
1813		       keys.authkey, keys.authkeylen,
1814		       ad, 0, &ctx->dec.epib[1]))
1815		return -EINVAL;
1816
1817	cfg.enc = false;
1818	cmdl_len = sa_format_cmdl_gen(&cfg, (u8 *)ctx->dec.cmdl,
1819				      &ctx->dec.cmdl_upd_info);
1820
1821	if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
1822		return -EINVAL;
1823
1824	ctx->dec.cmdl_size = cmdl_len;
1825
1826	crypto_aead_clear_flags(ctx->fallback.aead, CRYPTO_TFM_REQ_MASK);
1827	crypto_aead_set_flags(ctx->fallback.aead,
1828			      crypto_aead_get_flags(authenc) &
1829			      CRYPTO_TFM_REQ_MASK);
1830	crypto_aead_setkey(ctx->fallback.aead, key, keylen);
1831
1832	return 0;
1833}
1834
1835static int sa_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
1836{
1837	struct sa_tfm_ctx *ctx = crypto_tfm_ctx(crypto_aead_tfm(tfm));
1838
1839	return crypto_aead_setauthsize(ctx->fallback.aead, authsize);
1840}
1841
1842static int sa_aead_cbc_sha1_setkey(struct crypto_aead *authenc,
1843				   const u8 *key, unsigned int keylen)
1844{
1845	struct algo_data ad = { 0 };
1846
1847	ad.ealg_id = SA_EALG_ID_AES_CBC;
1848	ad.aalg_id = SA_AALG_ID_HMAC_SHA1;
1849	ad.hash_size = SHA1_DIGEST_SIZE;
1850	ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA1;
1851
1852	return sa_aead_setkey(authenc, key, keylen, &ad);
1853}
1854
1855static int sa_aead_cbc_sha256_setkey(struct crypto_aead *authenc,
1856				     const u8 *key, unsigned int keylen)
1857{
1858	struct algo_data ad = { 0 };
1859
1860	ad.ealg_id = SA_EALG_ID_AES_CBC;
1861	ad.aalg_id = SA_AALG_ID_HMAC_SHA2_256;
1862	ad.hash_size = SHA256_DIGEST_SIZE;
1863	ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA256;
1864
1865	return sa_aead_setkey(authenc, key, keylen, &ad);
1866}
1867
1868static int sa_aead_run(struct aead_request *req, u8 *iv, int enc)
1869{
1870	struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1871	struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
1872	struct sa_req sa_req = { 0 };
1873	size_t auth_size, enc_size;
1874
1875	enc_size = req->cryptlen;
1876	auth_size = req->assoclen + req->cryptlen;
1877
1878	if (!enc) {
1879		enc_size -= crypto_aead_authsize(tfm);
1880		auth_size -= crypto_aead_authsize(tfm);
1881	}
1882
1883	if (auth_size > SA_MAX_DATA_SZ ||
1884	    (auth_size >= SA_UNSAFE_DATA_SZ_MIN &&
1885	     auth_size <= SA_UNSAFE_DATA_SZ_MAX)) {
1886		struct aead_request *subreq = aead_request_ctx(req);
1887		int ret;
1888
1889		aead_request_set_tfm(subreq, ctx->fallback.aead);
1890		aead_request_set_callback(subreq, req->base.flags,
1891					  req->base.complete, req->base.data);
1892		aead_request_set_crypt(subreq, req->src, req->dst,
1893				       req->cryptlen, req->iv);
1894		aead_request_set_ad(subreq, req->assoclen);
1895
1896		ret = enc ? crypto_aead_encrypt(subreq) :
1897			crypto_aead_decrypt(subreq);
1898		return ret;
1899	}
1900
1901	sa_req.enc_offset = req->assoclen;
1902	sa_req.enc_size = enc_size;
1903	sa_req.auth_size = auth_size;
1904	sa_req.size = auth_size;
1905	sa_req.enc_iv = iv;
1906	sa_req.type = CRYPTO_ALG_TYPE_AEAD;
1907	sa_req.enc = enc;
1908	sa_req.callback = sa_aead_dma_in_callback;
1909	sa_req.mdata_size = 52;
1910	sa_req.base = &req->base;
1911	sa_req.ctx = ctx;
1912	sa_req.src = req->src;
1913	sa_req.dst = req->dst;
1914
1915	return sa_run(&sa_req);
1916}
1917
1918/* AEAD algorithm encrypt interface function */
1919static int sa_aead_encrypt(struct aead_request *req)
1920{
1921	return sa_aead_run(req, req->iv, 1);
1922}
1923
1924/* AEAD algorithm decrypt interface function */
1925static int sa_aead_decrypt(struct aead_request *req)
1926{
1927	return sa_aead_run(req, req->iv, 0);
1928}
1929
1930static struct sa_alg_tmpl sa_algs[] = {
1931	{
1932		.type = CRYPTO_ALG_TYPE_SKCIPHER,
1933		.alg.skcipher = {
1934			.base.cra_name		= "cbc(aes)",
1935			.base.cra_driver_name	= "cbc-aes-sa2ul",
1936			.base.cra_priority	= 30000,
1937			.base.cra_flags		= CRYPTO_ALG_TYPE_SKCIPHER |
1938						  CRYPTO_ALG_KERN_DRIVER_ONLY |
1939						  CRYPTO_ALG_ASYNC |
1940						  CRYPTO_ALG_NEED_FALLBACK,
1941			.base.cra_blocksize	= AES_BLOCK_SIZE,
1942			.base.cra_ctxsize	= sizeof(struct sa_tfm_ctx),
1943			.base.cra_module	= THIS_MODULE,
1944			.init			= sa_cipher_cra_init,
1945			.exit			= sa_cipher_cra_exit,
1946			.min_keysize		= AES_MIN_KEY_SIZE,
1947			.max_keysize		= AES_MAX_KEY_SIZE,
1948			.ivsize			= AES_BLOCK_SIZE,
1949			.setkey			= sa_aes_cbc_setkey,
1950			.encrypt		= sa_encrypt,
1951			.decrypt		= sa_decrypt,
1952		}
1953	},
1954	{
1955		.type = CRYPTO_ALG_TYPE_SKCIPHER,
1956		.alg.skcipher = {
1957			.base.cra_name		= "ecb(aes)",
1958			.base.cra_driver_name	= "ecb-aes-sa2ul",
1959			.base.cra_priority	= 30000,
1960			.base.cra_flags		= CRYPTO_ALG_TYPE_SKCIPHER |
1961						  CRYPTO_ALG_KERN_DRIVER_ONLY |
1962						  CRYPTO_ALG_ASYNC |
1963						  CRYPTO_ALG_NEED_FALLBACK,
1964			.base.cra_blocksize	= AES_BLOCK_SIZE,
1965			.base.cra_ctxsize	= sizeof(struct sa_tfm_ctx),
1966			.base.cra_module	= THIS_MODULE,
1967			.init			= sa_cipher_cra_init,
1968			.exit			= sa_cipher_cra_exit,
1969			.min_keysize		= AES_MIN_KEY_SIZE,
1970			.max_keysize		= AES_MAX_KEY_SIZE,
1971			.setkey			= sa_aes_ecb_setkey,
1972			.encrypt		= sa_encrypt,
1973			.decrypt		= sa_decrypt,
1974		}
1975	},
1976	{
1977		.type = CRYPTO_ALG_TYPE_SKCIPHER,
1978		.alg.skcipher = {
1979			.base.cra_name		= "cbc(des3_ede)",
1980			.base.cra_driver_name	= "cbc-des3-sa2ul",
1981			.base.cra_priority	= 30000,
1982			.base.cra_flags		= CRYPTO_ALG_TYPE_SKCIPHER |
1983						  CRYPTO_ALG_KERN_DRIVER_ONLY |
1984						  CRYPTO_ALG_ASYNC |
1985						  CRYPTO_ALG_NEED_FALLBACK,
1986			.base.cra_blocksize	= DES_BLOCK_SIZE,
1987			.base.cra_ctxsize	= sizeof(struct sa_tfm_ctx),
1988			.base.cra_module	= THIS_MODULE,
1989			.init			= sa_cipher_cra_init,
1990			.exit			= sa_cipher_cra_exit,
1991			.min_keysize		= 3 * DES_KEY_SIZE,
1992			.max_keysize		= 3 * DES_KEY_SIZE,
1993			.ivsize			= DES_BLOCK_SIZE,
1994			.setkey			= sa_3des_cbc_setkey,
1995			.encrypt		= sa_encrypt,
1996			.decrypt		= sa_decrypt,
1997		}
1998	},
1999	{
2000		.type = CRYPTO_ALG_TYPE_SKCIPHER,
2001		.alg.skcipher = {
2002			.base.cra_name		= "ecb(des3_ede)",
2003			.base.cra_driver_name	= "ecb-des3-sa2ul",
2004			.base.cra_priority	= 30000,
2005			.base.cra_flags		= CRYPTO_ALG_TYPE_SKCIPHER |
2006						  CRYPTO_ALG_KERN_DRIVER_ONLY |
2007						  CRYPTO_ALG_ASYNC |
2008						  CRYPTO_ALG_NEED_FALLBACK,
2009			.base.cra_blocksize	= DES_BLOCK_SIZE,
2010			.base.cra_ctxsize	= sizeof(struct sa_tfm_ctx),
2011			.base.cra_module	= THIS_MODULE,
2012			.init			= sa_cipher_cra_init,
2013			.exit			= sa_cipher_cra_exit,
2014			.min_keysize		= 3 * DES_KEY_SIZE,
2015			.max_keysize		= 3 * DES_KEY_SIZE,
2016			.setkey			= sa_3des_ecb_setkey,
2017			.encrypt		= sa_encrypt,
2018			.decrypt		= sa_decrypt,
2019		}
2020	},
2021	{
2022		.type = CRYPTO_ALG_TYPE_AHASH,
2023		.alg.ahash = {
2024			.halg.base = {
2025				.cra_name	= "sha1",
2026				.cra_driver_name	= "sha1-sa2ul",
2027				.cra_priority	= 400,
2028				.cra_flags	= CRYPTO_ALG_TYPE_AHASH |
2029						  CRYPTO_ALG_ASYNC |
2030						  CRYPTO_ALG_KERN_DRIVER_ONLY |
2031						  CRYPTO_ALG_NEED_FALLBACK,
2032				.cra_blocksize	= SHA1_BLOCK_SIZE,
2033				.cra_ctxsize	= sizeof(struct sa_tfm_ctx),
2034				.cra_module	= THIS_MODULE,
2035				.cra_init	= sa_sha1_cra_init,
2036				.cra_exit	= sa_sha_cra_exit,
2037			},
2038			.halg.digestsize	= SHA1_DIGEST_SIZE,
2039			.halg.statesize		= sizeof(struct sa_sha_req_ctx) +
2040						  sizeof(struct sha1_state),
2041			.init			= sa_sha_init,
2042			.update			= sa_sha_update,
2043			.final			= sa_sha_final,
2044			.finup			= sa_sha_finup,
2045			.digest			= sa_sha_digest,
2046			.export			= sa_sha_export,
2047			.import			= sa_sha_import,
2048		},
2049	},
2050	{
2051		.type = CRYPTO_ALG_TYPE_AHASH,
2052		.alg.ahash = {
2053			.halg.base = {
2054				.cra_name	= "sha256",
2055				.cra_driver_name	= "sha256-sa2ul",
2056				.cra_priority	= 400,
2057				.cra_flags	= CRYPTO_ALG_TYPE_AHASH |
2058						  CRYPTO_ALG_ASYNC |
2059						  CRYPTO_ALG_KERN_DRIVER_ONLY |
2060						  CRYPTO_ALG_NEED_FALLBACK,
2061				.cra_blocksize	= SHA256_BLOCK_SIZE,
2062				.cra_ctxsize	= sizeof(struct sa_tfm_ctx),
2063				.cra_module	= THIS_MODULE,
2064				.cra_init	= sa_sha256_cra_init,
2065				.cra_exit	= sa_sha_cra_exit,
2066			},
2067			.halg.digestsize	= SHA256_DIGEST_SIZE,
2068			.halg.statesize		= sizeof(struct sa_sha_req_ctx) +
2069						  sizeof(struct sha256_state),
2070			.init			= sa_sha_init,
2071			.update			= sa_sha_update,
2072			.final			= sa_sha_final,
2073			.finup			= sa_sha_finup,
2074			.digest			= sa_sha_digest,
2075			.export			= sa_sha_export,
2076			.import			= sa_sha_import,
2077		},
2078	},
2079	{
2080		.type = CRYPTO_ALG_TYPE_AHASH,
2081		.alg.ahash = {
2082			.halg.base = {
2083				.cra_name	= "sha512",
2084				.cra_driver_name	= "sha512-sa2ul",
2085				.cra_priority	= 400,
2086				.cra_flags	= CRYPTO_ALG_TYPE_AHASH |
2087						  CRYPTO_ALG_ASYNC |
2088						  CRYPTO_ALG_KERN_DRIVER_ONLY |
2089						  CRYPTO_ALG_NEED_FALLBACK,
2090				.cra_blocksize	= SHA512_BLOCK_SIZE,
2091				.cra_ctxsize	= sizeof(struct sa_tfm_ctx),
2092				.cra_module	= THIS_MODULE,
2093				.cra_init	= sa_sha512_cra_init,
2094				.cra_exit	= sa_sha_cra_exit,
2095			},
2096			.halg.digestsize	= SHA512_DIGEST_SIZE,
2097			.halg.statesize		= sizeof(struct sa_sha_req_ctx) +
2098						  sizeof(struct sha512_state),
2099			.init			= sa_sha_init,
2100			.update			= sa_sha_update,
2101			.final			= sa_sha_final,
2102			.finup			= sa_sha_finup,
2103			.digest			= sa_sha_digest,
2104			.export			= sa_sha_export,
2105			.import			= sa_sha_import,
2106		},
2107	},
2108	{
2109		.type	= CRYPTO_ALG_TYPE_AEAD,
2110		.alg.aead = {
2111			.base = {
2112				.cra_name = "authenc(hmac(sha1),cbc(aes))",
2113				.cra_driver_name =
2114					"authenc(hmac(sha1),cbc(aes))-sa2ul",
2115				.cra_blocksize = AES_BLOCK_SIZE,
2116				.cra_flags = CRYPTO_ALG_TYPE_AEAD |
2117					CRYPTO_ALG_KERN_DRIVER_ONLY |
2118					CRYPTO_ALG_ASYNC |
2119					CRYPTO_ALG_NEED_FALLBACK,
2120				.cra_ctxsize = sizeof(struct sa_tfm_ctx),
2121				.cra_module = THIS_MODULE,
2122				.cra_priority = 3000,
2123			},
2124			.ivsize = AES_BLOCK_SIZE,
2125			.maxauthsize = SHA1_DIGEST_SIZE,
2126
2127			.init = sa_cra_init_aead_sha1,
2128			.exit = sa_exit_tfm_aead,
2129			.setkey = sa_aead_cbc_sha1_setkey,
2130			.setauthsize = sa_aead_setauthsize,
2131			.encrypt = sa_aead_encrypt,
2132			.decrypt = sa_aead_decrypt,
2133		},
2134	},
2135	{
2136		.type	= CRYPTO_ALG_TYPE_AEAD,
2137		.alg.aead = {
2138			.base = {
2139				.cra_name = "authenc(hmac(sha256),cbc(aes))",
2140				.cra_driver_name =
2141					"authenc(hmac(sha256),cbc(aes))-sa2ul",
2142				.cra_blocksize = AES_BLOCK_SIZE,
2143				.cra_flags = CRYPTO_ALG_TYPE_AEAD |
2144					CRYPTO_ALG_KERN_DRIVER_ONLY |
2145					CRYPTO_ALG_ASYNC |
2146					CRYPTO_ALG_NEED_FALLBACK,
2147				.cra_ctxsize = sizeof(struct sa_tfm_ctx),
2148				.cra_module = THIS_MODULE,
2149				.cra_alignmask = 0,
2150				.cra_priority = 3000,
2151			},
2152			.ivsize = AES_BLOCK_SIZE,
2153			.maxauthsize = SHA256_DIGEST_SIZE,
2154
2155			.init = sa_cra_init_aead_sha256,
2156			.exit = sa_exit_tfm_aead,
2157			.setkey = sa_aead_cbc_sha256_setkey,
2158			.setauthsize = sa_aead_setauthsize,
2159			.encrypt = sa_aead_encrypt,
2160			.decrypt = sa_aead_decrypt,
2161		},
2162	},
2163};
2164
2165/* Register the algorithms in crypto framework */
2166static void sa_register_algos(const struct device *dev)
2167{
 
 
2168	char *alg_name;
2169	u32 type;
2170	int i, err;
2171
2172	for (i = 0; i < ARRAY_SIZE(sa_algs); i++) {
 
 
 
 
2173		type = sa_algs[i].type;
2174		if (type == CRYPTO_ALG_TYPE_SKCIPHER) {
2175			alg_name = sa_algs[i].alg.skcipher.base.cra_name;
2176			err = crypto_register_skcipher(&sa_algs[i].alg.skcipher);
2177		} else if (type == CRYPTO_ALG_TYPE_AHASH) {
2178			alg_name = sa_algs[i].alg.ahash.halg.base.cra_name;
2179			err = crypto_register_ahash(&sa_algs[i].alg.ahash);
2180		} else if (type == CRYPTO_ALG_TYPE_AEAD) {
2181			alg_name = sa_algs[i].alg.aead.base.cra_name;
2182			err = crypto_register_aead(&sa_algs[i].alg.aead);
2183		} else {
2184			dev_err(dev,
2185				"un-supported crypto algorithm (%d)",
2186				sa_algs[i].type);
2187			continue;
2188		}
2189
2190		if (err)
2191			dev_err(dev, "Failed to register '%s'\n", alg_name);
2192		else
2193			sa_algs[i].registered = true;
2194	}
2195}
2196
2197/* Unregister the algorithms in crypto framework */
2198static void sa_unregister_algos(const struct device *dev)
2199{
2200	u32 type;
2201	int i;
2202
2203	for (i = 0; i < ARRAY_SIZE(sa_algs); i++) {
2204		type = sa_algs[i].type;
2205		if (!sa_algs[i].registered)
2206			continue;
2207		if (type == CRYPTO_ALG_TYPE_SKCIPHER)
2208			crypto_unregister_skcipher(&sa_algs[i].alg.skcipher);
2209		else if (type == CRYPTO_ALG_TYPE_AHASH)
2210			crypto_unregister_ahash(&sa_algs[i].alg.ahash);
2211		else if (type == CRYPTO_ALG_TYPE_AEAD)
2212			crypto_unregister_aead(&sa_algs[i].alg.aead);
2213
2214		sa_algs[i].registered = false;
2215	}
2216}
2217
2218static int sa_init_mem(struct sa_crypto_data *dev_data)
2219{
2220	struct device *dev = &dev_data->pdev->dev;
2221	/* Setup dma pool for security context buffers */
2222	dev_data->sc_pool = dma_pool_create("keystone-sc", dev,
2223					    SA_CTX_MAX_SZ, 64, 0);
2224	if (!dev_data->sc_pool) {
2225		dev_err(dev, "Failed to create dma pool");
2226		return -ENOMEM;
2227	}
2228
2229	return 0;
2230}
2231
2232static int sa_dma_init(struct sa_crypto_data *dd)
2233{
2234	int ret;
2235	struct dma_slave_config cfg;
2236
2237	dd->dma_rx1 = NULL;
2238	dd->dma_tx = NULL;
2239	dd->dma_rx2 = NULL;
2240
2241	ret = dma_coerce_mask_and_coherent(dd->dev, DMA_BIT_MASK(48));
2242	if (ret)
2243		return ret;
2244
2245	dd->dma_rx1 = dma_request_chan(dd->dev, "rx1");
2246	if (IS_ERR(dd->dma_rx1)) {
2247		if (PTR_ERR(dd->dma_rx1) != -EPROBE_DEFER)
2248			dev_err(dd->dev, "Unable to request rx1 DMA channel\n");
2249		return PTR_ERR(dd->dma_rx1);
2250	}
2251
2252	dd->dma_rx2 = dma_request_chan(dd->dev, "rx2");
2253	if (IS_ERR(dd->dma_rx2)) {
2254		dma_release_channel(dd->dma_rx1);
2255		if (PTR_ERR(dd->dma_rx2) != -EPROBE_DEFER)
2256			dev_err(dd->dev, "Unable to request rx2 DMA channel\n");
2257		return PTR_ERR(dd->dma_rx2);
2258	}
2259
2260	dd->dma_tx = dma_request_chan(dd->dev, "tx");
2261	if (IS_ERR(dd->dma_tx)) {
2262		if (PTR_ERR(dd->dma_tx) != -EPROBE_DEFER)
2263			dev_err(dd->dev, "Unable to request tx DMA channel\n");
2264		ret = PTR_ERR(dd->dma_tx);
2265		goto err_dma_tx;
2266	}
2267
2268	memzero_explicit(&cfg, sizeof(cfg));
2269
2270	cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2271	cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2272	cfg.src_maxburst = 4;
2273	cfg.dst_maxburst = 4;
2274
2275	ret = dmaengine_slave_config(dd->dma_rx1, &cfg);
2276	if (ret) {
2277		dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
2278			ret);
2279		return ret;
2280	}
2281
2282	ret = dmaengine_slave_config(dd->dma_rx2, &cfg);
2283	if (ret) {
2284		dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
2285			ret);
2286		return ret;
2287	}
2288
2289	ret = dmaengine_slave_config(dd->dma_tx, &cfg);
2290	if (ret) {
2291		dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
2292			ret);
2293		return ret;
2294	}
2295
2296	return 0;
2297
 
 
2298err_dma_tx:
 
 
2299	dma_release_channel(dd->dma_rx1);
2300	dma_release_channel(dd->dma_rx2);
2301
2302	return ret;
2303}
2304
2305static int sa_link_child(struct device *dev, void *data)
2306{
2307	struct device *parent = data;
2308
2309	device_link_add(dev, parent, DL_FLAG_AUTOPROBE_CONSUMER);
2310
2311	return 0;
2312}
2313
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2314static int sa_ul_probe(struct platform_device *pdev)
2315{
2316	struct device *dev = &pdev->dev;
2317	struct device_node *node = dev->of_node;
2318	struct resource *res;
2319	static void __iomem *saul_base;
2320	struct sa_crypto_data *dev_data;
2321	u32 val;
2322	int ret;
2323
2324	dev_data = devm_kzalloc(dev, sizeof(*dev_data), GFP_KERNEL);
2325	if (!dev_data)
2326		return -ENOMEM;
2327
 
 
 
 
 
 
 
 
2328	sa_k3_dev = dev;
2329	dev_data->dev = dev;
2330	dev_data->pdev = pdev;
 
2331	platform_set_drvdata(pdev, dev_data);
2332	dev_set_drvdata(sa_k3_dev, dev_data);
2333
2334	pm_runtime_enable(dev);
2335	ret = pm_runtime_get_sync(dev);
2336	if (ret) {
2337		dev_err(&pdev->dev, "%s: failed to get sync: %d\n", __func__,
2338			ret);
2339		return ret;
2340	}
2341
2342	sa_init_mem(dev_data);
2343	ret = sa_dma_init(dev_data);
2344	if (ret)
2345		goto disable_pm_runtime;
2346
2347	spin_lock_init(&dev_data->scid_lock);
2348	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2349	saul_base = devm_ioremap_resource(dev, res);
2350
2351	dev_data->base = saul_base;
2352	val = SA_EEC_ENCSS_EN | SA_EEC_AUTHSS_EN | SA_EEC_CTXCACH_EN |
2353	    SA_EEC_CPPI_PORT_IN_EN | SA_EEC_CPPI_PORT_OUT_EN |
2354	    SA_EEC_TRNG_EN;
 
 
 
 
2355
2356	writel_relaxed(val, saul_base + SA_ENGINE_ENABLE_CONTROL);
2357
2358	sa_register_algos(dev);
2359
2360	ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
2361	if (ret)
2362		goto release_dma;
2363
2364	device_for_each_child(&pdev->dev, &pdev->dev, sa_link_child);
2365
2366	return 0;
2367
2368release_dma:
2369	sa_unregister_algos(&pdev->dev);
2370
2371	dma_release_channel(dev_data->dma_rx2);
2372	dma_release_channel(dev_data->dma_rx1);
2373	dma_release_channel(dev_data->dma_tx);
2374
 
2375	dma_pool_destroy(dev_data->sc_pool);
2376
2377disable_pm_runtime:
2378	pm_runtime_put_sync(&pdev->dev);
2379	pm_runtime_disable(&pdev->dev);
2380
2381	return ret;
2382}
2383
2384static int sa_ul_remove(struct platform_device *pdev)
2385{
2386	struct sa_crypto_data *dev_data = platform_get_drvdata(pdev);
2387
 
 
2388	sa_unregister_algos(&pdev->dev);
2389
2390	dma_release_channel(dev_data->dma_rx2);
2391	dma_release_channel(dev_data->dma_rx1);
2392	dma_release_channel(dev_data->dma_tx);
2393
2394	dma_pool_destroy(dev_data->sc_pool);
2395
2396	platform_set_drvdata(pdev, NULL);
2397
2398	pm_runtime_put_sync(&pdev->dev);
2399	pm_runtime_disable(&pdev->dev);
2400
2401	return 0;
2402}
2403
2404static const struct of_device_id of_match[] = {
2405	{.compatible = "ti,j721e-sa2ul",},
2406	{.compatible = "ti,am654-sa2ul",},
2407	{},
2408};
2409MODULE_DEVICE_TABLE(of, of_match);
2410
2411static struct platform_driver sa_ul_driver = {
2412	.probe = sa_ul_probe,
2413	.remove = sa_ul_remove,
2414	.driver = {
2415		   .name = "saul-crypto",
2416		   .of_match_table = of_match,
2417		   },
2418};
2419module_platform_driver(sa_ul_driver);
2420MODULE_LICENSE("GPL v2");