Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * K3 SA2UL crypto accelerator driver
4 *
5 * Copyright (C) 2018-2020 Texas Instruments Incorporated - http://www.ti.com
6 *
7 * Authors: Keerthy
8 * Vitaly Andrianov
9 * Tero Kristo
10 */
11#include <linux/bitfield.h>
12#include <linux/clk.h>
13#include <linux/dma-mapping.h>
14#include <linux/dmaengine.h>
15#include <linux/dmapool.h>
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/of.h>
19#include <linux/of_platform.h>
20#include <linux/platform_device.h>
21#include <linux/pm_runtime.h>
22
23#include <crypto/aes.h>
24#include <crypto/authenc.h>
25#include <crypto/des.h>
26#include <crypto/internal/aead.h>
27#include <crypto/internal/hash.h>
28#include <crypto/internal/skcipher.h>
29#include <crypto/scatterwalk.h>
30#include <crypto/sha1.h>
31#include <crypto/sha2.h>
32
33#include "sa2ul.h"
34
35/* Byte offset for key in encryption security context */
36#define SC_ENC_KEY_OFFSET (1 + 27 + 4)
37/* Byte offset for Aux-1 in encryption security context */
38#define SC_ENC_AUX1_OFFSET (1 + 27 + 4 + 32)
39
40#define SA_CMDL_UPD_ENC 0x0001
41#define SA_CMDL_UPD_AUTH 0x0002
42#define SA_CMDL_UPD_ENC_IV 0x0004
43#define SA_CMDL_UPD_AUTH_IV 0x0008
44#define SA_CMDL_UPD_AUX_KEY 0x0010
45
46#define SA_AUTH_SUBKEY_LEN 16
47#define SA_CMDL_PAYLOAD_LENGTH_MASK 0xFFFF
48#define SA_CMDL_SOP_BYPASS_LEN_MASK 0xFF000000
49
50#define MODE_CONTROL_BYTES 27
51#define SA_HASH_PROCESSING 0
52#define SA_CRYPTO_PROCESSING 0
53#define SA_UPLOAD_HASH_TO_TLR BIT(6)
54
55#define SA_SW0_FLAGS_MASK 0xF0000
56#define SA_SW0_CMDL_INFO_MASK 0x1F00000
57#define SA_SW0_CMDL_PRESENT BIT(4)
58#define SA_SW0_ENG_ID_MASK 0x3E000000
59#define SA_SW0_DEST_INFO_PRESENT BIT(30)
60#define SA_SW2_EGRESS_LENGTH 0xFF000000
61#define SA_BASIC_HASH 0x10
62
63#define SHA256_DIGEST_WORDS 8
64/* Make 32-bit word from 4 bytes */
65#define SA_MK_U32(b0, b1, b2, b3) (((b0) << 24) | ((b1) << 16) | \
66 ((b2) << 8) | (b3))
67
68/* size of SCCTL structure in bytes */
69#define SA_SCCTL_SZ 16
70
71/* Max Authentication tag size */
72#define SA_MAX_AUTH_TAG_SZ 64
73
74enum sa_algo_id {
75 SA_ALG_CBC_AES = 0,
76 SA_ALG_EBC_AES,
77 SA_ALG_CBC_DES3,
78 SA_ALG_ECB_DES3,
79 SA_ALG_SHA1,
80 SA_ALG_SHA256,
81 SA_ALG_SHA512,
82 SA_ALG_AUTHENC_SHA1_AES,
83 SA_ALG_AUTHENC_SHA256_AES,
84};
85
86struct sa_match_data {
87 u8 priv;
88 u8 priv_id;
89 u32 supported_algos;
90};
91
92static struct device *sa_k3_dev;
93
94/**
95 * struct sa_cmdl_cfg - Command label configuration descriptor
96 * @aalg: authentication algorithm ID
97 * @enc_eng_id: Encryption Engine ID supported by the SA hardware
98 * @auth_eng_id: Authentication Engine ID
99 * @iv_size: Initialization Vector size
100 * @akey: Authentication key
101 * @akey_len: Authentication key length
102 * @enc: True, if this is an encode request
103 */
104struct sa_cmdl_cfg {
105 int aalg;
106 u8 enc_eng_id;
107 u8 auth_eng_id;
108 u8 iv_size;
109 const u8 *akey;
110 u16 akey_len;
111 bool enc;
112};
113
114/**
115 * struct algo_data - Crypto algorithm specific data
116 * @enc_eng: Encryption engine info structure
117 * @auth_eng: Authentication engine info structure
118 * @auth_ctrl: Authentication control word
119 * @hash_size: Size of digest
120 * @iv_idx: iv index in psdata
121 * @iv_out_size: iv out size
122 * @ealg_id: Encryption Algorithm ID
123 * @aalg_id: Authentication algorithm ID
124 * @mci_enc: Mode Control Instruction for Encryption algorithm
125 * @mci_dec: Mode Control Instruction for Decryption
126 * @inv_key: Whether the encryption algorithm demands key inversion
127 * @ctx: Pointer to the algorithm context
128 * @keyed_mac: Whether the authentication algorithm has key
129 * @prep_iopad: Function pointer to generate intermediate ipad/opad
130 */
131struct algo_data {
132 struct sa_eng_info enc_eng;
133 struct sa_eng_info auth_eng;
134 u8 auth_ctrl;
135 u8 hash_size;
136 u8 iv_idx;
137 u8 iv_out_size;
138 u8 ealg_id;
139 u8 aalg_id;
140 u8 *mci_enc;
141 u8 *mci_dec;
142 bool inv_key;
143 struct sa_tfm_ctx *ctx;
144 bool keyed_mac;
145 void (*prep_iopad)(struct algo_data *algo, const u8 *key,
146 u16 key_sz, __be32 *ipad, __be32 *opad);
147};
148
149/**
150 * struct sa_alg_tmpl: A generic template encompassing crypto/aead algorithms
151 * @type: Type of the crypto algorithm.
152 * @alg: Union of crypto algorithm definitions.
153 * @registered: Flag indicating if the crypto algorithm is already registered
154 */
155struct sa_alg_tmpl {
156 u32 type; /* CRYPTO_ALG_TYPE from <linux/crypto.h> */
157 union {
158 struct skcipher_alg skcipher;
159 struct ahash_alg ahash;
160 struct aead_alg aead;
161 } alg;
162 bool registered;
163};
164
165/**
166 * struct sa_mapped_sg: scatterlist information for tx and rx
167 * @mapped: Set to true if the @sgt is mapped
168 * @dir: mapping direction used for @sgt
169 * @split_sg: Set if the sg is split and needs to be freed up
170 * @static_sg: Static scatterlist entry for overriding data
171 * @sgt: scatterlist table for DMA API use
172 */
173struct sa_mapped_sg {
174 bool mapped;
175 enum dma_data_direction dir;
176 struct scatterlist static_sg;
177 struct scatterlist *split_sg;
178 struct sg_table sgt;
179};
180/**
181 * struct sa_rx_data: RX Packet miscellaneous data place holder
182 * @req: crypto request data pointer
183 * @ddev: pointer to the DMA device
184 * @tx_in: dma_async_tx_descriptor pointer for rx channel
185 * @mapped_sg: Information on tx (0) and rx (1) scatterlist DMA mapping
186 * @enc: Flag indicating either encryption or decryption
187 * @enc_iv_size: Initialisation vector size
188 * @iv_idx: Initialisation vector index
189 */
190struct sa_rx_data {
191 void *req;
192 struct device *ddev;
193 struct dma_async_tx_descriptor *tx_in;
194 struct sa_mapped_sg mapped_sg[2];
195 u8 enc;
196 u8 enc_iv_size;
197 u8 iv_idx;
198};
199
200/**
201 * struct sa_req: SA request definition
202 * @dev: device for the request
203 * @size: total data to the xmitted via DMA
204 * @enc_offset: offset of cipher data
205 * @enc_size: data to be passed to cipher engine
206 * @enc_iv: cipher IV
207 * @auth_offset: offset of the authentication data
208 * @auth_size: size of the authentication data
209 * @auth_iv: authentication IV
210 * @type: algorithm type for the request
211 * @cmdl: command label pointer
212 * @base: pointer to the base request
213 * @ctx: pointer to the algorithm context data
214 * @enc: true if this is an encode request
215 * @src: source data
216 * @dst: destination data
217 * @callback: DMA callback for the request
218 * @mdata_size: metadata size passed to DMA
219 */
220struct sa_req {
221 struct device *dev;
222 u16 size;
223 u8 enc_offset;
224 u16 enc_size;
225 u8 *enc_iv;
226 u8 auth_offset;
227 u16 auth_size;
228 u8 *auth_iv;
229 u32 type;
230 u32 *cmdl;
231 struct crypto_async_request *base;
232 struct sa_tfm_ctx *ctx;
233 bool enc;
234 struct scatterlist *src;
235 struct scatterlist *dst;
236 dma_async_tx_callback callback;
237 u16 mdata_size;
238};
239
240/*
241 * Mode Control Instructions for various Key lengths 128, 192, 256
242 * For CBC (Cipher Block Chaining) mode for encryption
243 */
244static u8 mci_cbc_enc_array[3][MODE_CONTROL_BYTES] = {
245 { 0x61, 0x00, 0x00, 0x18, 0x88, 0x0a, 0xaa, 0x4b, 0x7e, 0x00,
246 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
247 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
248 { 0x61, 0x00, 0x00, 0x18, 0x88, 0x4a, 0xaa, 0x4b, 0x7e, 0x00,
249 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
250 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
251 { 0x61, 0x00, 0x00, 0x18, 0x88, 0x8a, 0xaa, 0x4b, 0x7e, 0x00,
252 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
253 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
254};
255
256/*
257 * Mode Control Instructions for various Key lengths 128, 192, 256
258 * For CBC (Cipher Block Chaining) mode for decryption
259 */
260static u8 mci_cbc_dec_array[3][MODE_CONTROL_BYTES] = {
261 { 0x71, 0x00, 0x00, 0x80, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
262 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
263 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
264 { 0x71, 0x00, 0x00, 0x84, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
265 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
266 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
267 { 0x71, 0x00, 0x00, 0x88, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
268 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
269 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
270};
271
272/*
273 * Mode Control Instructions for various Key lengths 128, 192, 256
274 * For CBC (Cipher Block Chaining) mode for encryption
275 */
276static u8 mci_cbc_enc_no_iv_array[3][MODE_CONTROL_BYTES] = {
277 { 0x21, 0x00, 0x00, 0x18, 0x88, 0x0a, 0xaa, 0x4b, 0x7e, 0x00,
278 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
279 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
280 { 0x21, 0x00, 0x00, 0x18, 0x88, 0x4a, 0xaa, 0x4b, 0x7e, 0x00,
281 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
282 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
283 { 0x21, 0x00, 0x00, 0x18, 0x88, 0x8a, 0xaa, 0x4b, 0x7e, 0x00,
284 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
285 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
286};
287
288/*
289 * Mode Control Instructions for various Key lengths 128, 192, 256
290 * For CBC (Cipher Block Chaining) mode for decryption
291 */
292static u8 mci_cbc_dec_no_iv_array[3][MODE_CONTROL_BYTES] = {
293 { 0x31, 0x00, 0x00, 0x80, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
294 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
295 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
296 { 0x31, 0x00, 0x00, 0x84, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
297 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
298 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
299 { 0x31, 0x00, 0x00, 0x88, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
300 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
301 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
302};
303
304/*
305 * Mode Control Instructions for various Key lengths 128, 192, 256
306 * For ECB (Electronic Code Book) mode for encryption
307 */
308static u8 mci_ecb_enc_array[3][27] = {
309 { 0x21, 0x00, 0x00, 0x80, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
310 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
311 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
312 { 0x21, 0x00, 0x00, 0x84, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
313 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
314 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
315 { 0x21, 0x00, 0x00, 0x88, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
316 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
317 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
318};
319
320/*
321 * Mode Control Instructions for various Key lengths 128, 192, 256
322 * For ECB (Electronic Code Book) mode for decryption
323 */
324static u8 mci_ecb_dec_array[3][27] = {
325 { 0x31, 0x00, 0x00, 0x80, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
326 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
327 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
328 { 0x31, 0x00, 0x00, 0x84, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
329 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
330 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
331 { 0x31, 0x00, 0x00, 0x88, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
332 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
333 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
334};
335
336/*
337 * Mode Control Instructions for DES algorithm
338 * For CBC (Cipher Block Chaining) mode and ECB mode
339 * encryption and for decryption respectively
340 */
341static u8 mci_cbc_3des_enc_array[MODE_CONTROL_BYTES] = {
342 0x60, 0x00, 0x00, 0x18, 0x88, 0x52, 0xaa, 0x4b, 0x7e, 0x00, 0x00, 0x00,
343 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
344 0x00, 0x00, 0x00,
345};
346
347static u8 mci_cbc_3des_dec_array[MODE_CONTROL_BYTES] = {
348 0x70, 0x00, 0x00, 0x85, 0x0a, 0xca, 0x98, 0xf4, 0x40, 0xc0, 0x00, 0x00,
349 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
350 0x00, 0x00, 0x00,
351};
352
353static u8 mci_ecb_3des_enc_array[MODE_CONTROL_BYTES] = {
354 0x20, 0x00, 0x00, 0x85, 0x0a, 0x04, 0xb7, 0x90, 0x00, 0x00, 0x00, 0x00,
355 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
356 0x00, 0x00, 0x00,
357};
358
359static u8 mci_ecb_3des_dec_array[MODE_CONTROL_BYTES] = {
360 0x30, 0x00, 0x00, 0x85, 0x0a, 0x04, 0xb7, 0x90, 0x00, 0x00, 0x00, 0x00,
361 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
362 0x00, 0x00, 0x00,
363};
364
365/*
366 * Perform 16 byte or 128 bit swizzling
367 * The SA2UL Expects the security context to
368 * be in little Endian and the bus width is 128 bits or 16 bytes
369 * Hence swap 16 bytes at a time from higher to lower address
370 */
371static void sa_swiz_128(u8 *in, u16 len)
372{
373 u8 data[16];
374 int i, j;
375
376 for (i = 0; i < len; i += 16) {
377 memcpy(data, &in[i], 16);
378 for (j = 0; j < 16; j++)
379 in[i + j] = data[15 - j];
380 }
381}
382
383/* Prepare the ipad and opad from key as per SHA algorithm step 1*/
384static void prepare_kipad(u8 *k_ipad, const u8 *key, u16 key_sz)
385{
386 int i;
387
388 for (i = 0; i < key_sz; i++)
389 k_ipad[i] = key[i] ^ 0x36;
390
391 /* Instead of XOR with 0 */
392 for (; i < SHA1_BLOCK_SIZE; i++)
393 k_ipad[i] = 0x36;
394}
395
396static void prepare_kopad(u8 *k_opad, const u8 *key, u16 key_sz)
397{
398 int i;
399
400 for (i = 0; i < key_sz; i++)
401 k_opad[i] = key[i] ^ 0x5c;
402
403 /* Instead of XOR with 0 */
404 for (; i < SHA1_BLOCK_SIZE; i++)
405 k_opad[i] = 0x5c;
406}
407
408static void sa_export_shash(void *state, struct shash_desc *hash,
409 int digest_size, __be32 *out)
410{
411 struct sha1_state *sha1;
412 struct sha256_state *sha256;
413 u32 *result;
414
415 switch (digest_size) {
416 case SHA1_DIGEST_SIZE:
417 sha1 = state;
418 result = sha1->state;
419 break;
420 case SHA256_DIGEST_SIZE:
421 sha256 = state;
422 result = sha256->state;
423 break;
424 default:
425 dev_err(sa_k3_dev, "%s: bad digest_size=%d\n", __func__,
426 digest_size);
427 return;
428 }
429
430 crypto_shash_export(hash, state);
431
432 cpu_to_be32_array(out, result, digest_size / 4);
433}
434
435static void sa_prepare_iopads(struct algo_data *data, const u8 *key,
436 u16 key_sz, __be32 *ipad, __be32 *opad)
437{
438 SHASH_DESC_ON_STACK(shash, data->ctx->shash);
439 int block_size = crypto_shash_blocksize(data->ctx->shash);
440 int digest_size = crypto_shash_digestsize(data->ctx->shash);
441 union {
442 struct sha1_state sha1;
443 struct sha256_state sha256;
444 u8 k_pad[SHA1_BLOCK_SIZE];
445 } sha;
446
447 shash->tfm = data->ctx->shash;
448
449 prepare_kipad(sha.k_pad, key, key_sz);
450
451 crypto_shash_init(shash);
452 crypto_shash_update(shash, sha.k_pad, block_size);
453 sa_export_shash(&sha, shash, digest_size, ipad);
454
455 prepare_kopad(sha.k_pad, key, key_sz);
456
457 crypto_shash_init(shash);
458 crypto_shash_update(shash, sha.k_pad, block_size);
459
460 sa_export_shash(&sha, shash, digest_size, opad);
461
462 memzero_explicit(&sha, sizeof(sha));
463}
464
465/* Derive the inverse key used in AES-CBC decryption operation */
466static inline int sa_aes_inv_key(u8 *inv_key, const u8 *key, u16 key_sz)
467{
468 struct crypto_aes_ctx ctx;
469 int key_pos;
470
471 if (aes_expandkey(&ctx, key, key_sz)) {
472 dev_err(sa_k3_dev, "%s: bad key len(%d)\n", __func__, key_sz);
473 return -EINVAL;
474 }
475
476 /* work around to get the right inverse for AES_KEYSIZE_192 size keys */
477 if (key_sz == AES_KEYSIZE_192) {
478 ctx.key_enc[52] = ctx.key_enc[51] ^ ctx.key_enc[46];
479 ctx.key_enc[53] = ctx.key_enc[52] ^ ctx.key_enc[47];
480 }
481
482 /* Based crypto_aes_expand_key logic */
483 switch (key_sz) {
484 case AES_KEYSIZE_128:
485 case AES_KEYSIZE_192:
486 key_pos = key_sz + 24;
487 break;
488
489 case AES_KEYSIZE_256:
490 key_pos = key_sz + 24 - 4;
491 break;
492
493 default:
494 dev_err(sa_k3_dev, "%s: bad key len(%d)\n", __func__, key_sz);
495 return -EINVAL;
496 }
497
498 memcpy(inv_key, &ctx.key_enc[key_pos], key_sz);
499 return 0;
500}
501
502/* Set Security context for the encryption engine */
503static int sa_set_sc_enc(struct algo_data *ad, const u8 *key, u16 key_sz,
504 u8 enc, u8 *sc_buf)
505{
506 const u8 *mci = NULL;
507
508 /* Set Encryption mode selector to crypto processing */
509 sc_buf[0] = SA_CRYPTO_PROCESSING;
510
511 if (enc)
512 mci = ad->mci_enc;
513 else
514 mci = ad->mci_dec;
515 /* Set the mode control instructions in security context */
516 if (mci)
517 memcpy(&sc_buf[1], mci, MODE_CONTROL_BYTES);
518
519 /* For AES-CBC decryption get the inverse key */
520 if (ad->inv_key && !enc) {
521 if (sa_aes_inv_key(&sc_buf[SC_ENC_KEY_OFFSET], key, key_sz))
522 return -EINVAL;
523 /* For all other cases: key is used */
524 } else {
525 memcpy(&sc_buf[SC_ENC_KEY_OFFSET], key, key_sz);
526 }
527
528 return 0;
529}
530
531/* Set Security context for the authentication engine */
532static void sa_set_sc_auth(struct algo_data *ad, const u8 *key, u16 key_sz,
533 u8 *sc_buf)
534{
535 __be32 *ipad = (void *)(sc_buf + 32);
536 __be32 *opad = (void *)(sc_buf + 64);
537
538 /* Set Authentication mode selector to hash processing */
539 sc_buf[0] = SA_HASH_PROCESSING;
540 /* Auth SW ctrl word: bit[6]=1 (upload computed hash to TLR section) */
541 sc_buf[1] = SA_UPLOAD_HASH_TO_TLR;
542 sc_buf[1] |= ad->auth_ctrl;
543
544 /* Copy the keys or ipad/opad */
545 if (ad->keyed_mac)
546 ad->prep_iopad(ad, key, key_sz, ipad, opad);
547 else {
548 /* basic hash */
549 sc_buf[1] |= SA_BASIC_HASH;
550 }
551}
552
553static inline void sa_copy_iv(__be32 *out, const u8 *iv, bool size16)
554{
555 int j;
556
557 for (j = 0; j < ((size16) ? 4 : 2); j++) {
558 *out = cpu_to_be32(*((u32 *)iv));
559 iv += 4;
560 out++;
561 }
562}
563
564/* Format general command label */
565static int sa_format_cmdl_gen(struct sa_cmdl_cfg *cfg, u8 *cmdl,
566 struct sa_cmdl_upd_info *upd_info)
567{
568 u8 enc_offset = 0, auth_offset = 0, total = 0;
569 u8 enc_next_eng = SA_ENG_ID_OUTPORT2;
570 u8 auth_next_eng = SA_ENG_ID_OUTPORT2;
571 u32 *word_ptr = (u32 *)cmdl;
572 int i;
573
574 /* Clear the command label */
575 memzero_explicit(cmdl, (SA_MAX_CMDL_WORDS * sizeof(u32)));
576
577 /* Iniialize the command update structure */
578 memzero_explicit(upd_info, sizeof(*upd_info));
579
580 if (cfg->enc_eng_id && cfg->auth_eng_id) {
581 if (cfg->enc) {
582 auth_offset = SA_CMDL_HEADER_SIZE_BYTES;
583 enc_next_eng = cfg->auth_eng_id;
584
585 if (cfg->iv_size)
586 auth_offset += cfg->iv_size;
587 } else {
588 enc_offset = SA_CMDL_HEADER_SIZE_BYTES;
589 auth_next_eng = cfg->enc_eng_id;
590 }
591 }
592
593 if (cfg->enc_eng_id) {
594 upd_info->flags |= SA_CMDL_UPD_ENC;
595 upd_info->enc_size.index = enc_offset >> 2;
596 upd_info->enc_offset.index = upd_info->enc_size.index + 1;
597 /* Encryption command label */
598 cmdl[enc_offset + SA_CMDL_OFFSET_NESC] = enc_next_eng;
599
600 /* Encryption modes requiring IV */
601 if (cfg->iv_size) {
602 upd_info->flags |= SA_CMDL_UPD_ENC_IV;
603 upd_info->enc_iv.index =
604 (enc_offset + SA_CMDL_HEADER_SIZE_BYTES) >> 2;
605 upd_info->enc_iv.size = cfg->iv_size;
606
607 cmdl[enc_offset + SA_CMDL_OFFSET_LABEL_LEN] =
608 SA_CMDL_HEADER_SIZE_BYTES + cfg->iv_size;
609
610 cmdl[enc_offset + SA_CMDL_OFFSET_OPTION_CTRL1] =
611 (SA_CTX_ENC_AUX2_OFFSET | (cfg->iv_size >> 3));
612 total += SA_CMDL_HEADER_SIZE_BYTES + cfg->iv_size;
613 } else {
614 cmdl[enc_offset + SA_CMDL_OFFSET_LABEL_LEN] =
615 SA_CMDL_HEADER_SIZE_BYTES;
616 total += SA_CMDL_HEADER_SIZE_BYTES;
617 }
618 }
619
620 if (cfg->auth_eng_id) {
621 upd_info->flags |= SA_CMDL_UPD_AUTH;
622 upd_info->auth_size.index = auth_offset >> 2;
623 upd_info->auth_offset.index = upd_info->auth_size.index + 1;
624 cmdl[auth_offset + SA_CMDL_OFFSET_NESC] = auth_next_eng;
625 cmdl[auth_offset + SA_CMDL_OFFSET_LABEL_LEN] =
626 SA_CMDL_HEADER_SIZE_BYTES;
627 total += SA_CMDL_HEADER_SIZE_BYTES;
628 }
629
630 total = roundup(total, 8);
631
632 for (i = 0; i < total / 4; i++)
633 word_ptr[i] = swab32(word_ptr[i]);
634
635 return total;
636}
637
638/* Update Command label */
639static inline void sa_update_cmdl(struct sa_req *req, u32 *cmdl,
640 struct sa_cmdl_upd_info *upd_info)
641{
642 int i = 0, j;
643
644 if (likely(upd_info->flags & SA_CMDL_UPD_ENC)) {
645 cmdl[upd_info->enc_size.index] &= ~SA_CMDL_PAYLOAD_LENGTH_MASK;
646 cmdl[upd_info->enc_size.index] |= req->enc_size;
647 cmdl[upd_info->enc_offset.index] &=
648 ~SA_CMDL_SOP_BYPASS_LEN_MASK;
649 cmdl[upd_info->enc_offset.index] |=
650 FIELD_PREP(SA_CMDL_SOP_BYPASS_LEN_MASK,
651 req->enc_offset);
652
653 if (likely(upd_info->flags & SA_CMDL_UPD_ENC_IV)) {
654 __be32 *data = (__be32 *)&cmdl[upd_info->enc_iv.index];
655 u32 *enc_iv = (u32 *)req->enc_iv;
656
657 for (j = 0; i < upd_info->enc_iv.size; i += 4, j++) {
658 data[j] = cpu_to_be32(*enc_iv);
659 enc_iv++;
660 }
661 }
662 }
663
664 if (likely(upd_info->flags & SA_CMDL_UPD_AUTH)) {
665 cmdl[upd_info->auth_size.index] &= ~SA_CMDL_PAYLOAD_LENGTH_MASK;
666 cmdl[upd_info->auth_size.index] |= req->auth_size;
667 cmdl[upd_info->auth_offset.index] &=
668 ~SA_CMDL_SOP_BYPASS_LEN_MASK;
669 cmdl[upd_info->auth_offset.index] |=
670 FIELD_PREP(SA_CMDL_SOP_BYPASS_LEN_MASK,
671 req->auth_offset);
672 if (upd_info->flags & SA_CMDL_UPD_AUTH_IV) {
673 sa_copy_iv((void *)&cmdl[upd_info->auth_iv.index],
674 req->auth_iv,
675 (upd_info->auth_iv.size > 8));
676 }
677 if (upd_info->flags & SA_CMDL_UPD_AUX_KEY) {
678 int offset = (req->auth_size & 0xF) ? 4 : 0;
679
680 memcpy(&cmdl[upd_info->aux_key_info.index],
681 &upd_info->aux_key[offset], 16);
682 }
683 }
684}
685
686/* Format SWINFO words to be sent to SA */
687static
688void sa_set_swinfo(u8 eng_id, u16 sc_id, dma_addr_t sc_phys,
689 u8 cmdl_present, u8 cmdl_offset, u8 flags,
690 u8 hash_size, u32 *swinfo)
691{
692 swinfo[0] = sc_id;
693 swinfo[0] |= FIELD_PREP(SA_SW0_FLAGS_MASK, flags);
694 if (likely(cmdl_present))
695 swinfo[0] |= FIELD_PREP(SA_SW0_CMDL_INFO_MASK,
696 cmdl_offset | SA_SW0_CMDL_PRESENT);
697 swinfo[0] |= FIELD_PREP(SA_SW0_ENG_ID_MASK, eng_id);
698
699 swinfo[0] |= SA_SW0_DEST_INFO_PRESENT;
700 swinfo[1] = (u32)(sc_phys & 0xFFFFFFFFULL);
701 swinfo[2] = (u32)((sc_phys & 0xFFFFFFFF00000000ULL) >> 32);
702 swinfo[2] |= FIELD_PREP(SA_SW2_EGRESS_LENGTH, hash_size);
703}
704
705/* Dump the security context */
706static void sa_dump_sc(u8 *buf, dma_addr_t dma_addr)
707{
708#ifdef DEBUG
709 dev_info(sa_k3_dev, "Security context dump:: 0x%pad\n", &dma_addr);
710 print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
711 16, 1, buf, SA_CTX_MAX_SZ, false);
712#endif
713}
714
715static
716int sa_init_sc(struct sa_ctx_info *ctx, const struct sa_match_data *match_data,
717 const u8 *enc_key, u16 enc_key_sz,
718 const u8 *auth_key, u16 auth_key_sz,
719 struct algo_data *ad, u8 enc, u32 *swinfo)
720{
721 int enc_sc_offset = 0;
722 int auth_sc_offset = 0;
723 u8 *sc_buf = ctx->sc;
724 u16 sc_id = ctx->sc_id;
725 u8 first_engine = 0;
726
727 memzero_explicit(sc_buf, SA_CTX_MAX_SZ);
728
729 if (ad->auth_eng.eng_id) {
730 if (enc)
731 first_engine = ad->enc_eng.eng_id;
732 else
733 first_engine = ad->auth_eng.eng_id;
734
735 enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ;
736 auth_sc_offset = enc_sc_offset + ad->enc_eng.sc_size;
737 sc_buf[1] = SA_SCCTL_FE_AUTH_ENC;
738 if (!ad->hash_size)
739 return -EINVAL;
740 ad->hash_size = roundup(ad->hash_size, 8);
741
742 } else if (ad->enc_eng.eng_id && !ad->auth_eng.eng_id) {
743 enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ;
744 first_engine = ad->enc_eng.eng_id;
745 sc_buf[1] = SA_SCCTL_FE_ENC;
746 ad->hash_size = ad->iv_out_size;
747 }
748
749 /* SCCTL Owner info: 0=host, 1=CP_ACE */
750 sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0;
751 memcpy(&sc_buf[2], &sc_id, 2);
752 sc_buf[4] = 0x0;
753 sc_buf[5] = match_data->priv_id;
754 sc_buf[6] = match_data->priv;
755 sc_buf[7] = 0x0;
756
757 /* Prepare context for encryption engine */
758 if (ad->enc_eng.sc_size) {
759 if (sa_set_sc_enc(ad, enc_key, enc_key_sz, enc,
760 &sc_buf[enc_sc_offset]))
761 return -EINVAL;
762 }
763
764 /* Prepare context for authentication engine */
765 if (ad->auth_eng.sc_size)
766 sa_set_sc_auth(ad, auth_key, auth_key_sz,
767 &sc_buf[auth_sc_offset]);
768
769 /* Set the ownership of context to CP_ACE */
770 sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0x80;
771
772 /* swizzle the security context */
773 sa_swiz_128(sc_buf, SA_CTX_MAX_SZ);
774
775 sa_set_swinfo(first_engine, ctx->sc_id, ctx->sc_phys, 1, 0,
776 SA_SW_INFO_FLAG_EVICT, ad->hash_size, swinfo);
777
778 sa_dump_sc(sc_buf, ctx->sc_phys);
779
780 return 0;
781}
782
783/* Free the per direction context memory */
784static void sa_free_ctx_info(struct sa_ctx_info *ctx,
785 struct sa_crypto_data *data)
786{
787 unsigned long bn;
788
789 bn = ctx->sc_id - data->sc_id_start;
790 spin_lock(&data->scid_lock);
791 __clear_bit(bn, data->ctx_bm);
792 data->sc_id--;
793 spin_unlock(&data->scid_lock);
794
795 if (ctx->sc) {
796 dma_pool_free(data->sc_pool, ctx->sc, ctx->sc_phys);
797 ctx->sc = NULL;
798 }
799}
800
801static int sa_init_ctx_info(struct sa_ctx_info *ctx,
802 struct sa_crypto_data *data)
803{
804 unsigned long bn;
805 int err;
806
807 spin_lock(&data->scid_lock);
808 bn = find_first_zero_bit(data->ctx_bm, SA_MAX_NUM_CTX);
809 __set_bit(bn, data->ctx_bm);
810 data->sc_id++;
811 spin_unlock(&data->scid_lock);
812
813 ctx->sc_id = (u16)(data->sc_id_start + bn);
814
815 ctx->sc = dma_pool_alloc(data->sc_pool, GFP_KERNEL, &ctx->sc_phys);
816 if (!ctx->sc) {
817 dev_err(&data->pdev->dev, "Failed to allocate SC memory\n");
818 err = -ENOMEM;
819 goto scid_rollback;
820 }
821
822 return 0;
823
824scid_rollback:
825 spin_lock(&data->scid_lock);
826 __clear_bit(bn, data->ctx_bm);
827 data->sc_id--;
828 spin_unlock(&data->scid_lock);
829
830 return err;
831}
832
833static void sa_cipher_cra_exit(struct crypto_skcipher *tfm)
834{
835 struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
836 struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
837
838 dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
839 __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
840 ctx->dec.sc_id, &ctx->dec.sc_phys);
841
842 sa_free_ctx_info(&ctx->enc, data);
843 sa_free_ctx_info(&ctx->dec, data);
844
845 crypto_free_skcipher(ctx->fallback.skcipher);
846}
847
848static int sa_cipher_cra_init(struct crypto_skcipher *tfm)
849{
850 struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
851 struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
852 const char *name = crypto_tfm_alg_name(&tfm->base);
853 struct crypto_skcipher *child;
854 int ret;
855
856 memzero_explicit(ctx, sizeof(*ctx));
857 ctx->dev_data = data;
858
859 ret = sa_init_ctx_info(&ctx->enc, data);
860 if (ret)
861 return ret;
862 ret = sa_init_ctx_info(&ctx->dec, data);
863 if (ret) {
864 sa_free_ctx_info(&ctx->enc, data);
865 return ret;
866 }
867
868 child = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
869
870 if (IS_ERR(child)) {
871 dev_err(sa_k3_dev, "Error allocating fallback algo %s\n", name);
872 return PTR_ERR(child);
873 }
874
875 ctx->fallback.skcipher = child;
876 crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(child) +
877 sizeof(struct skcipher_request));
878
879 dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
880 __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
881 ctx->dec.sc_id, &ctx->dec.sc_phys);
882 return 0;
883}
884
885static int sa_cipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
886 unsigned int keylen, struct algo_data *ad)
887{
888 struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
889 struct crypto_skcipher *child = ctx->fallback.skcipher;
890 int cmdl_len;
891 struct sa_cmdl_cfg cfg;
892 int ret;
893
894 if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
895 keylen != AES_KEYSIZE_256)
896 return -EINVAL;
897
898 ad->enc_eng.eng_id = SA_ENG_ID_EM1;
899 ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
900
901 memzero_explicit(&cfg, sizeof(cfg));
902 cfg.enc_eng_id = ad->enc_eng.eng_id;
903 cfg.iv_size = crypto_skcipher_ivsize(tfm);
904
905 crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
906 crypto_skcipher_set_flags(child, tfm->base.crt_flags &
907 CRYPTO_TFM_REQ_MASK);
908 ret = crypto_skcipher_setkey(child, key, keylen);
909 if (ret)
910 return ret;
911
912 /* Setup Encryption Security Context & Command label template */
913 if (sa_init_sc(&ctx->enc, ctx->dev_data->match_data, key, keylen, NULL, 0,
914 ad, 1, &ctx->enc.epib[1]))
915 goto badkey;
916
917 cmdl_len = sa_format_cmdl_gen(&cfg,
918 (u8 *)ctx->enc.cmdl,
919 &ctx->enc.cmdl_upd_info);
920 if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
921 goto badkey;
922
923 ctx->enc.cmdl_size = cmdl_len;
924
925 /* Setup Decryption Security Context & Command label template */
926 if (sa_init_sc(&ctx->dec, ctx->dev_data->match_data, key, keylen, NULL, 0,
927 ad, 0, &ctx->dec.epib[1]))
928 goto badkey;
929
930 cfg.enc_eng_id = ad->enc_eng.eng_id;
931 cmdl_len = sa_format_cmdl_gen(&cfg, (u8 *)ctx->dec.cmdl,
932 &ctx->dec.cmdl_upd_info);
933
934 if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
935 goto badkey;
936
937 ctx->dec.cmdl_size = cmdl_len;
938 ctx->iv_idx = ad->iv_idx;
939
940 return 0;
941
942badkey:
943 dev_err(sa_k3_dev, "%s: badkey\n", __func__);
944 return -EINVAL;
945}
946
947static int sa_aes_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key,
948 unsigned int keylen)
949{
950 struct algo_data ad = { 0 };
951 /* Convert the key size (16/24/32) to the key size index (0/1/2) */
952 int key_idx = (keylen >> 3) - 2;
953
954 if (key_idx >= 3)
955 return -EINVAL;
956
957 ad.mci_enc = mci_cbc_enc_array[key_idx];
958 ad.mci_dec = mci_cbc_dec_array[key_idx];
959 ad.inv_key = true;
960 ad.ealg_id = SA_EALG_ID_AES_CBC;
961 ad.iv_idx = 4;
962 ad.iv_out_size = 16;
963
964 return sa_cipher_setkey(tfm, key, keylen, &ad);
965}
966
967static int sa_aes_ecb_setkey(struct crypto_skcipher *tfm, const u8 *key,
968 unsigned int keylen)
969{
970 struct algo_data ad = { 0 };
971 /* Convert the key size (16/24/32) to the key size index (0/1/2) */
972 int key_idx = (keylen >> 3) - 2;
973
974 if (key_idx >= 3)
975 return -EINVAL;
976
977 ad.mci_enc = mci_ecb_enc_array[key_idx];
978 ad.mci_dec = mci_ecb_dec_array[key_idx];
979 ad.inv_key = true;
980 ad.ealg_id = SA_EALG_ID_AES_ECB;
981
982 return sa_cipher_setkey(tfm, key, keylen, &ad);
983}
984
985static int sa_3des_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key,
986 unsigned int keylen)
987{
988 struct algo_data ad = { 0 };
989
990 ad.mci_enc = mci_cbc_3des_enc_array;
991 ad.mci_dec = mci_cbc_3des_dec_array;
992 ad.ealg_id = SA_EALG_ID_3DES_CBC;
993 ad.iv_idx = 6;
994 ad.iv_out_size = 8;
995
996 return sa_cipher_setkey(tfm, key, keylen, &ad);
997}
998
999static int sa_3des_ecb_setkey(struct crypto_skcipher *tfm, const u8 *key,
1000 unsigned int keylen)
1001{
1002 struct algo_data ad = { 0 };
1003
1004 ad.mci_enc = mci_ecb_3des_enc_array;
1005 ad.mci_dec = mci_ecb_3des_dec_array;
1006
1007 return sa_cipher_setkey(tfm, key, keylen, &ad);
1008}
1009
1010static void sa_sync_from_device(struct sa_rx_data *rxd)
1011{
1012 struct sg_table *sgt;
1013
1014 if (rxd->mapped_sg[0].dir == DMA_BIDIRECTIONAL)
1015 sgt = &rxd->mapped_sg[0].sgt;
1016 else
1017 sgt = &rxd->mapped_sg[1].sgt;
1018
1019 dma_sync_sgtable_for_cpu(rxd->ddev, sgt, DMA_FROM_DEVICE);
1020}
1021
1022static void sa_free_sa_rx_data(struct sa_rx_data *rxd)
1023{
1024 int i;
1025
1026 for (i = 0; i < ARRAY_SIZE(rxd->mapped_sg); i++) {
1027 struct sa_mapped_sg *mapped_sg = &rxd->mapped_sg[i];
1028
1029 if (mapped_sg->mapped) {
1030 dma_unmap_sgtable(rxd->ddev, &mapped_sg->sgt,
1031 mapped_sg->dir, 0);
1032 kfree(mapped_sg->split_sg);
1033 }
1034 }
1035
1036 kfree(rxd);
1037}
1038
1039static void sa_aes_dma_in_callback(void *data)
1040{
1041 struct sa_rx_data *rxd = data;
1042 struct skcipher_request *req;
1043 u32 *result;
1044 __be32 *mdptr;
1045 size_t ml, pl;
1046 int i;
1047
1048 sa_sync_from_device(rxd);
1049 req = container_of(rxd->req, struct skcipher_request, base);
1050
1051 if (req->iv) {
1052 mdptr = (__be32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl,
1053 &ml);
1054 result = (u32 *)req->iv;
1055
1056 for (i = 0; i < (rxd->enc_iv_size / 4); i++)
1057 result[i] = be32_to_cpu(mdptr[i + rxd->iv_idx]);
1058 }
1059
1060 sa_free_sa_rx_data(rxd);
1061
1062 skcipher_request_complete(req, 0);
1063}
1064
1065static void
1066sa_prepare_tx_desc(u32 *mdptr, u32 pslen, u32 *psdata, u32 epiblen, u32 *epib)
1067{
1068 u32 *out, *in;
1069 int i;
1070
1071 for (out = mdptr, in = epib, i = 0; i < epiblen / sizeof(u32); i++)
1072 *out++ = *in++;
1073
1074 mdptr[4] = (0xFFFF << 16);
1075 for (out = &mdptr[5], in = psdata, i = 0;
1076 i < pslen / sizeof(u32); i++)
1077 *out++ = *in++;
1078}
1079
1080static int sa_run(struct sa_req *req)
1081{
1082 struct sa_rx_data *rxd;
1083 gfp_t gfp_flags;
1084 u32 cmdl[SA_MAX_CMDL_WORDS];
1085 struct sa_crypto_data *pdata = dev_get_drvdata(sa_k3_dev);
1086 struct device *ddev;
1087 struct dma_chan *dma_rx;
1088 int sg_nents, src_nents, dst_nents;
1089 struct scatterlist *src, *dst;
1090 size_t pl, ml, split_size;
1091 struct sa_ctx_info *sa_ctx = req->enc ? &req->ctx->enc : &req->ctx->dec;
1092 int ret;
1093 struct dma_async_tx_descriptor *tx_out;
1094 u32 *mdptr;
1095 bool diff_dst;
1096 enum dma_data_direction dir_src;
1097 struct sa_mapped_sg *mapped_sg;
1098
1099 gfp_flags = req->base->flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
1100 GFP_KERNEL : GFP_ATOMIC;
1101
1102 rxd = kzalloc(sizeof(*rxd), gfp_flags);
1103 if (!rxd)
1104 return -ENOMEM;
1105
1106 if (req->src != req->dst) {
1107 diff_dst = true;
1108 dir_src = DMA_TO_DEVICE;
1109 } else {
1110 diff_dst = false;
1111 dir_src = DMA_BIDIRECTIONAL;
1112 }
1113
1114 /*
1115 * SA2UL has an interesting feature where the receive DMA channel
1116 * is selected based on the data passed to the engine. Within the
1117 * transition range, there is also a space where it is impossible
1118 * to determine where the data will end up, and this should be
1119 * avoided. This will be handled by the SW fallback mechanism by
1120 * the individual algorithm implementations.
1121 */
1122 if (req->size >= 256)
1123 dma_rx = pdata->dma_rx2;
1124 else
1125 dma_rx = pdata->dma_rx1;
1126
1127 ddev = dmaengine_get_dma_device(pdata->dma_tx);
1128 rxd->ddev = ddev;
1129
1130 memcpy(cmdl, sa_ctx->cmdl, sa_ctx->cmdl_size);
1131
1132 sa_update_cmdl(req, cmdl, &sa_ctx->cmdl_upd_info);
1133
1134 if (req->type != CRYPTO_ALG_TYPE_AHASH) {
1135 if (req->enc)
1136 req->type |=
1137 (SA_REQ_SUBTYPE_ENC << SA_REQ_SUBTYPE_SHIFT);
1138 else
1139 req->type |=
1140 (SA_REQ_SUBTYPE_DEC << SA_REQ_SUBTYPE_SHIFT);
1141 }
1142
1143 cmdl[sa_ctx->cmdl_size / sizeof(u32)] = req->type;
1144
1145 /*
1146 * Map the packets, first we check if the data fits into a single
1147 * sg entry and use that if possible. If it does not fit, we check
1148 * if we need to do sg_split to align the scatterlist data on the
1149 * actual data size being processed by the crypto engine.
1150 */
1151 src = req->src;
1152 sg_nents = sg_nents_for_len(src, req->size);
1153
1154 split_size = req->size;
1155
1156 mapped_sg = &rxd->mapped_sg[0];
1157 if (sg_nents == 1 && split_size <= req->src->length) {
1158 src = &mapped_sg->static_sg;
1159 src_nents = 1;
1160 sg_init_table(src, 1);
1161 sg_set_page(src, sg_page(req->src), split_size,
1162 req->src->offset);
1163
1164 mapped_sg->sgt.sgl = src;
1165 mapped_sg->sgt.orig_nents = src_nents;
1166 ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0);
1167 if (ret) {
1168 kfree(rxd);
1169 return ret;
1170 }
1171
1172 mapped_sg->dir = dir_src;
1173 mapped_sg->mapped = true;
1174 } else {
1175 mapped_sg->sgt.sgl = req->src;
1176 mapped_sg->sgt.orig_nents = sg_nents;
1177 ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0);
1178 if (ret) {
1179 kfree(rxd);
1180 return ret;
1181 }
1182
1183 mapped_sg->dir = dir_src;
1184 mapped_sg->mapped = true;
1185
1186 ret = sg_split(mapped_sg->sgt.sgl, mapped_sg->sgt.nents, 0, 1,
1187 &split_size, &src, &src_nents, gfp_flags);
1188 if (ret) {
1189 src_nents = mapped_sg->sgt.nents;
1190 src = mapped_sg->sgt.sgl;
1191 } else {
1192 mapped_sg->split_sg = src;
1193 }
1194 }
1195
1196 dma_sync_sgtable_for_device(ddev, &mapped_sg->sgt, DMA_TO_DEVICE);
1197
1198 if (!diff_dst) {
1199 dst_nents = src_nents;
1200 dst = src;
1201 } else {
1202 dst_nents = sg_nents_for_len(req->dst, req->size);
1203 mapped_sg = &rxd->mapped_sg[1];
1204
1205 if (dst_nents == 1 && split_size <= req->dst->length) {
1206 dst = &mapped_sg->static_sg;
1207 dst_nents = 1;
1208 sg_init_table(dst, 1);
1209 sg_set_page(dst, sg_page(req->dst), split_size,
1210 req->dst->offset);
1211
1212 mapped_sg->sgt.sgl = dst;
1213 mapped_sg->sgt.orig_nents = dst_nents;
1214 ret = dma_map_sgtable(ddev, &mapped_sg->sgt,
1215 DMA_FROM_DEVICE, 0);
1216 if (ret)
1217 goto err_cleanup;
1218
1219 mapped_sg->dir = DMA_FROM_DEVICE;
1220 mapped_sg->mapped = true;
1221 } else {
1222 mapped_sg->sgt.sgl = req->dst;
1223 mapped_sg->sgt.orig_nents = dst_nents;
1224 ret = dma_map_sgtable(ddev, &mapped_sg->sgt,
1225 DMA_FROM_DEVICE, 0);
1226 if (ret)
1227 goto err_cleanup;
1228
1229 mapped_sg->dir = DMA_FROM_DEVICE;
1230 mapped_sg->mapped = true;
1231
1232 ret = sg_split(mapped_sg->sgt.sgl, mapped_sg->sgt.nents,
1233 0, 1, &split_size, &dst, &dst_nents,
1234 gfp_flags);
1235 if (ret) {
1236 dst_nents = mapped_sg->sgt.nents;
1237 dst = mapped_sg->sgt.sgl;
1238 } else {
1239 mapped_sg->split_sg = dst;
1240 }
1241 }
1242 }
1243
1244 rxd->tx_in = dmaengine_prep_slave_sg(dma_rx, dst, dst_nents,
1245 DMA_DEV_TO_MEM,
1246 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1247 if (!rxd->tx_in) {
1248 dev_err(pdata->dev, "IN prep_slave_sg() failed\n");
1249 ret = -EINVAL;
1250 goto err_cleanup;
1251 }
1252
1253 rxd->req = (void *)req->base;
1254 rxd->enc = req->enc;
1255 rxd->iv_idx = req->ctx->iv_idx;
1256 rxd->enc_iv_size = sa_ctx->cmdl_upd_info.enc_iv.size;
1257 rxd->tx_in->callback = req->callback;
1258 rxd->tx_in->callback_param = rxd;
1259
1260 tx_out = dmaengine_prep_slave_sg(pdata->dma_tx, src,
1261 src_nents, DMA_MEM_TO_DEV,
1262 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1263
1264 if (!tx_out) {
1265 dev_err(pdata->dev, "OUT prep_slave_sg() failed\n");
1266 ret = -EINVAL;
1267 goto err_cleanup;
1268 }
1269
1270 /*
1271 * Prepare metadata for DMA engine. This essentially describes the
1272 * crypto algorithm to be used, data sizes, different keys etc.
1273 */
1274 mdptr = (u32 *)dmaengine_desc_get_metadata_ptr(tx_out, &pl, &ml);
1275
1276 sa_prepare_tx_desc(mdptr, (sa_ctx->cmdl_size + (SA_PSDATA_CTX_WORDS *
1277 sizeof(u32))), cmdl, sizeof(sa_ctx->epib),
1278 sa_ctx->epib);
1279
1280 ml = sa_ctx->cmdl_size + (SA_PSDATA_CTX_WORDS * sizeof(u32));
1281 dmaengine_desc_set_metadata_len(tx_out, req->mdata_size);
1282
1283 dmaengine_submit(tx_out);
1284 dmaengine_submit(rxd->tx_in);
1285
1286 dma_async_issue_pending(dma_rx);
1287 dma_async_issue_pending(pdata->dma_tx);
1288
1289 return -EINPROGRESS;
1290
1291err_cleanup:
1292 sa_free_sa_rx_data(rxd);
1293
1294 return ret;
1295}
1296
1297static int sa_cipher_run(struct skcipher_request *req, u8 *iv, int enc)
1298{
1299 struct sa_tfm_ctx *ctx =
1300 crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
1301 struct crypto_alg *alg = req->base.tfm->__crt_alg;
1302 struct sa_req sa_req = { 0 };
1303
1304 if (!req->cryptlen)
1305 return 0;
1306
1307 if (req->cryptlen % alg->cra_blocksize)
1308 return -EINVAL;
1309
1310 /* Use SW fallback if the data size is not supported */
1311 if (req->cryptlen > SA_MAX_DATA_SZ ||
1312 (req->cryptlen >= SA_UNSAFE_DATA_SZ_MIN &&
1313 req->cryptlen <= SA_UNSAFE_DATA_SZ_MAX)) {
1314 struct skcipher_request *subreq = skcipher_request_ctx(req);
1315
1316 skcipher_request_set_tfm(subreq, ctx->fallback.skcipher);
1317 skcipher_request_set_callback(subreq, req->base.flags,
1318 req->base.complete,
1319 req->base.data);
1320 skcipher_request_set_crypt(subreq, req->src, req->dst,
1321 req->cryptlen, req->iv);
1322 if (enc)
1323 return crypto_skcipher_encrypt(subreq);
1324 else
1325 return crypto_skcipher_decrypt(subreq);
1326 }
1327
1328 sa_req.size = req->cryptlen;
1329 sa_req.enc_size = req->cryptlen;
1330 sa_req.src = req->src;
1331 sa_req.dst = req->dst;
1332 sa_req.enc_iv = iv;
1333 sa_req.type = CRYPTO_ALG_TYPE_SKCIPHER;
1334 sa_req.enc = enc;
1335 sa_req.callback = sa_aes_dma_in_callback;
1336 sa_req.mdata_size = 44;
1337 sa_req.base = &req->base;
1338 sa_req.ctx = ctx;
1339
1340 return sa_run(&sa_req);
1341}
1342
1343static int sa_encrypt(struct skcipher_request *req)
1344{
1345 return sa_cipher_run(req, req->iv, 1);
1346}
1347
1348static int sa_decrypt(struct skcipher_request *req)
1349{
1350 return sa_cipher_run(req, req->iv, 0);
1351}
1352
1353static void sa_sha_dma_in_callback(void *data)
1354{
1355 struct sa_rx_data *rxd = data;
1356 struct ahash_request *req;
1357 struct crypto_ahash *tfm;
1358 unsigned int authsize;
1359 int i;
1360 size_t ml, pl;
1361 u32 *result;
1362 __be32 *mdptr;
1363
1364 sa_sync_from_device(rxd);
1365 req = container_of(rxd->req, struct ahash_request, base);
1366 tfm = crypto_ahash_reqtfm(req);
1367 authsize = crypto_ahash_digestsize(tfm);
1368
1369 mdptr = (__be32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl, &ml);
1370 result = (u32 *)req->result;
1371
1372 for (i = 0; i < (authsize / 4); i++)
1373 result[i] = be32_to_cpu(mdptr[i + 4]);
1374
1375 sa_free_sa_rx_data(rxd);
1376
1377 ahash_request_complete(req, 0);
1378}
1379
1380static int zero_message_process(struct ahash_request *req)
1381{
1382 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1383 int sa_digest_size = crypto_ahash_digestsize(tfm);
1384
1385 switch (sa_digest_size) {
1386 case SHA1_DIGEST_SIZE:
1387 memcpy(req->result, sha1_zero_message_hash, sa_digest_size);
1388 break;
1389 case SHA256_DIGEST_SIZE:
1390 memcpy(req->result, sha256_zero_message_hash, sa_digest_size);
1391 break;
1392 case SHA512_DIGEST_SIZE:
1393 memcpy(req->result, sha512_zero_message_hash, sa_digest_size);
1394 break;
1395 default:
1396 return -EINVAL;
1397 }
1398
1399 return 0;
1400}
1401
1402static int sa_sha_run(struct ahash_request *req)
1403{
1404 struct sa_tfm_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
1405 struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1406 struct sa_req sa_req = { 0 };
1407 size_t auth_len;
1408
1409 auth_len = req->nbytes;
1410
1411 if (!auth_len)
1412 return zero_message_process(req);
1413
1414 if (auth_len > SA_MAX_DATA_SZ ||
1415 (auth_len >= SA_UNSAFE_DATA_SZ_MIN &&
1416 auth_len <= SA_UNSAFE_DATA_SZ_MAX)) {
1417 struct ahash_request *subreq = &rctx->fallback_req;
1418 int ret = 0;
1419
1420 ahash_request_set_tfm(subreq, ctx->fallback.ahash);
1421 subreq->base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1422
1423 crypto_ahash_init(subreq);
1424
1425 subreq->nbytes = auth_len;
1426 subreq->src = req->src;
1427 subreq->result = req->result;
1428
1429 ret |= crypto_ahash_update(subreq);
1430
1431 subreq->nbytes = 0;
1432
1433 ret |= crypto_ahash_final(subreq);
1434
1435 return ret;
1436 }
1437
1438 sa_req.size = auth_len;
1439 sa_req.auth_size = auth_len;
1440 sa_req.src = req->src;
1441 sa_req.dst = req->src;
1442 sa_req.enc = true;
1443 sa_req.type = CRYPTO_ALG_TYPE_AHASH;
1444 sa_req.callback = sa_sha_dma_in_callback;
1445 sa_req.mdata_size = 28;
1446 sa_req.ctx = ctx;
1447 sa_req.base = &req->base;
1448
1449 return sa_run(&sa_req);
1450}
1451
1452static int sa_sha_setup(struct sa_tfm_ctx *ctx, struct algo_data *ad)
1453{
1454 int bs = crypto_shash_blocksize(ctx->shash);
1455 int cmdl_len;
1456 struct sa_cmdl_cfg cfg;
1457
1458 ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
1459 ad->auth_eng.eng_id = SA_ENG_ID_AM1;
1460 ad->auth_eng.sc_size = SA_CTX_AUTH_TYPE2_SZ;
1461
1462 memset(ctx->authkey, 0, bs);
1463 memset(&cfg, 0, sizeof(cfg));
1464 cfg.aalg = ad->aalg_id;
1465 cfg.enc_eng_id = ad->enc_eng.eng_id;
1466 cfg.auth_eng_id = ad->auth_eng.eng_id;
1467 cfg.iv_size = 0;
1468 cfg.akey = NULL;
1469 cfg.akey_len = 0;
1470
1471 ctx->dev_data = dev_get_drvdata(sa_k3_dev);
1472 /* Setup Encryption Security Context & Command label template */
1473 if (sa_init_sc(&ctx->enc, ctx->dev_data->match_data, NULL, 0, NULL, 0,
1474 ad, 0, &ctx->enc.epib[1]))
1475 goto badkey;
1476
1477 cmdl_len = sa_format_cmdl_gen(&cfg,
1478 (u8 *)ctx->enc.cmdl,
1479 &ctx->enc.cmdl_upd_info);
1480 if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
1481 goto badkey;
1482
1483 ctx->enc.cmdl_size = cmdl_len;
1484
1485 return 0;
1486
1487badkey:
1488 dev_err(sa_k3_dev, "%s: badkey\n", __func__);
1489 return -EINVAL;
1490}
1491
1492static int sa_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
1493{
1494 struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1495 struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
1496 int ret;
1497
1498 memset(ctx, 0, sizeof(*ctx));
1499 ctx->dev_data = data;
1500 ret = sa_init_ctx_info(&ctx->enc, data);
1501 if (ret)
1502 return ret;
1503
1504 if (alg_base) {
1505 ctx->shash = crypto_alloc_shash(alg_base, 0,
1506 CRYPTO_ALG_NEED_FALLBACK);
1507 if (IS_ERR(ctx->shash)) {
1508 dev_err(sa_k3_dev, "base driver %s couldn't be loaded\n",
1509 alg_base);
1510 return PTR_ERR(ctx->shash);
1511 }
1512 /* for fallback */
1513 ctx->fallback.ahash =
1514 crypto_alloc_ahash(alg_base, 0,
1515 CRYPTO_ALG_NEED_FALLBACK);
1516 if (IS_ERR(ctx->fallback.ahash)) {
1517 dev_err(ctx->dev_data->dev,
1518 "Could not load fallback driver\n");
1519 return PTR_ERR(ctx->fallback.ahash);
1520 }
1521 }
1522
1523 dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
1524 __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
1525 ctx->dec.sc_id, &ctx->dec.sc_phys);
1526
1527 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1528 sizeof(struct sa_sha_req_ctx) +
1529 crypto_ahash_reqsize(ctx->fallback.ahash));
1530
1531 return 0;
1532}
1533
1534static int sa_sha_digest(struct ahash_request *req)
1535{
1536 return sa_sha_run(req);
1537}
1538
1539static int sa_sha_init(struct ahash_request *req)
1540{
1541 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1542 struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1543 struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1544
1545 dev_dbg(sa_k3_dev, "init: digest size: %u, rctx=%p\n",
1546 crypto_ahash_digestsize(tfm), rctx);
1547
1548 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1549 rctx->fallback_req.base.flags =
1550 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1551
1552 return crypto_ahash_init(&rctx->fallback_req);
1553}
1554
1555static int sa_sha_update(struct ahash_request *req)
1556{
1557 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1558 struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1559 struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1560
1561 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1562 rctx->fallback_req.base.flags =
1563 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1564 rctx->fallback_req.nbytes = req->nbytes;
1565 rctx->fallback_req.src = req->src;
1566
1567 return crypto_ahash_update(&rctx->fallback_req);
1568}
1569
1570static int sa_sha_final(struct ahash_request *req)
1571{
1572 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1573 struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1574 struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1575
1576 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1577 rctx->fallback_req.base.flags =
1578 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1579 rctx->fallback_req.result = req->result;
1580
1581 return crypto_ahash_final(&rctx->fallback_req);
1582}
1583
1584static int sa_sha_finup(struct ahash_request *req)
1585{
1586 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1587 struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1588 struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1589
1590 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1591 rctx->fallback_req.base.flags =
1592 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1593
1594 rctx->fallback_req.nbytes = req->nbytes;
1595 rctx->fallback_req.src = req->src;
1596 rctx->fallback_req.result = req->result;
1597
1598 return crypto_ahash_finup(&rctx->fallback_req);
1599}
1600
1601static int sa_sha_import(struct ahash_request *req, const void *in)
1602{
1603 struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1604 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1605 struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1606
1607 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1608 rctx->fallback_req.base.flags = req->base.flags &
1609 CRYPTO_TFM_REQ_MAY_SLEEP;
1610
1611 return crypto_ahash_import(&rctx->fallback_req, in);
1612}
1613
1614static int sa_sha_export(struct ahash_request *req, void *out)
1615{
1616 struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1617 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1618 struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1619 struct ahash_request *subreq = &rctx->fallback_req;
1620
1621 ahash_request_set_tfm(subreq, ctx->fallback.ahash);
1622 subreq->base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1623
1624 return crypto_ahash_export(subreq, out);
1625}
1626
1627static int sa_sha1_cra_init(struct crypto_tfm *tfm)
1628{
1629 struct algo_data ad = { 0 };
1630 struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1631
1632 sa_sha_cra_init_alg(tfm, "sha1");
1633
1634 ad.aalg_id = SA_AALG_ID_SHA1;
1635 ad.hash_size = SHA1_DIGEST_SIZE;
1636 ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA1;
1637
1638 sa_sha_setup(ctx, &ad);
1639
1640 return 0;
1641}
1642
1643static int sa_sha256_cra_init(struct crypto_tfm *tfm)
1644{
1645 struct algo_data ad = { 0 };
1646 struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1647
1648 sa_sha_cra_init_alg(tfm, "sha256");
1649
1650 ad.aalg_id = SA_AALG_ID_SHA2_256;
1651 ad.hash_size = SHA256_DIGEST_SIZE;
1652 ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA256;
1653
1654 sa_sha_setup(ctx, &ad);
1655
1656 return 0;
1657}
1658
1659static int sa_sha512_cra_init(struct crypto_tfm *tfm)
1660{
1661 struct algo_data ad = { 0 };
1662 struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1663
1664 sa_sha_cra_init_alg(tfm, "sha512");
1665
1666 ad.aalg_id = SA_AALG_ID_SHA2_512;
1667 ad.hash_size = SHA512_DIGEST_SIZE;
1668 ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA512;
1669
1670 sa_sha_setup(ctx, &ad);
1671
1672 return 0;
1673}
1674
1675static void sa_sha_cra_exit(struct crypto_tfm *tfm)
1676{
1677 struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1678 struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
1679
1680 dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
1681 __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
1682 ctx->dec.sc_id, &ctx->dec.sc_phys);
1683
1684 if (crypto_tfm_alg_type(tfm) == CRYPTO_ALG_TYPE_AHASH)
1685 sa_free_ctx_info(&ctx->enc, data);
1686
1687 crypto_free_shash(ctx->shash);
1688 crypto_free_ahash(ctx->fallback.ahash);
1689}
1690
1691static void sa_aead_dma_in_callback(void *data)
1692{
1693 struct sa_rx_data *rxd = data;
1694 struct aead_request *req;
1695 struct crypto_aead *tfm;
1696 unsigned int start;
1697 unsigned int authsize;
1698 u8 auth_tag[SA_MAX_AUTH_TAG_SZ];
1699 size_t pl, ml;
1700 int i;
1701 int err = 0;
1702 u32 *mdptr;
1703
1704 sa_sync_from_device(rxd);
1705 req = container_of(rxd->req, struct aead_request, base);
1706 tfm = crypto_aead_reqtfm(req);
1707 start = req->assoclen + req->cryptlen;
1708 authsize = crypto_aead_authsize(tfm);
1709
1710 mdptr = (u32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl, &ml);
1711 for (i = 0; i < (authsize / 4); i++)
1712 mdptr[i + 4] = swab32(mdptr[i + 4]);
1713
1714 if (rxd->enc) {
1715 scatterwalk_map_and_copy(&mdptr[4], req->dst, start, authsize,
1716 1);
1717 } else {
1718 start -= authsize;
1719 scatterwalk_map_and_copy(auth_tag, req->src, start, authsize,
1720 0);
1721
1722 err = memcmp(&mdptr[4], auth_tag, authsize) ? -EBADMSG : 0;
1723 }
1724
1725 sa_free_sa_rx_data(rxd);
1726
1727 aead_request_complete(req, err);
1728}
1729
1730static int sa_cra_init_aead(struct crypto_aead *tfm, const char *hash,
1731 const char *fallback)
1732{
1733 struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
1734 struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
1735 int ret;
1736
1737 memzero_explicit(ctx, sizeof(*ctx));
1738 ctx->dev_data = data;
1739
1740 ctx->shash = crypto_alloc_shash(hash, 0, CRYPTO_ALG_NEED_FALLBACK);
1741 if (IS_ERR(ctx->shash)) {
1742 dev_err(sa_k3_dev, "base driver %s couldn't be loaded\n", hash);
1743 return PTR_ERR(ctx->shash);
1744 }
1745
1746 ctx->fallback.aead = crypto_alloc_aead(fallback, 0,
1747 CRYPTO_ALG_NEED_FALLBACK);
1748
1749 if (IS_ERR(ctx->fallback.aead)) {
1750 dev_err(sa_k3_dev, "fallback driver %s couldn't be loaded\n",
1751 fallback);
1752 return PTR_ERR(ctx->fallback.aead);
1753 }
1754
1755 crypto_aead_set_reqsize(tfm, sizeof(struct aead_request) +
1756 crypto_aead_reqsize(ctx->fallback.aead));
1757
1758 ret = sa_init_ctx_info(&ctx->enc, data);
1759 if (ret)
1760 return ret;
1761
1762 ret = sa_init_ctx_info(&ctx->dec, data);
1763 if (ret) {
1764 sa_free_ctx_info(&ctx->enc, data);
1765 return ret;
1766 }
1767
1768 dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
1769 __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
1770 ctx->dec.sc_id, &ctx->dec.sc_phys);
1771
1772 return ret;
1773}
1774
1775static int sa_cra_init_aead_sha1(struct crypto_aead *tfm)
1776{
1777 return sa_cra_init_aead(tfm, "sha1",
1778 "authenc(hmac(sha1-ce),cbc(aes-ce))");
1779}
1780
1781static int sa_cra_init_aead_sha256(struct crypto_aead *tfm)
1782{
1783 return sa_cra_init_aead(tfm, "sha256",
1784 "authenc(hmac(sha256-ce),cbc(aes-ce))");
1785}
1786
1787static void sa_exit_tfm_aead(struct crypto_aead *tfm)
1788{
1789 struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
1790 struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
1791
1792 crypto_free_shash(ctx->shash);
1793 crypto_free_aead(ctx->fallback.aead);
1794
1795 sa_free_ctx_info(&ctx->enc, data);
1796 sa_free_ctx_info(&ctx->dec, data);
1797}
1798
1799/* AEAD algorithm configuration interface function */
1800static int sa_aead_setkey(struct crypto_aead *authenc,
1801 const u8 *key, unsigned int keylen,
1802 struct algo_data *ad)
1803{
1804 struct sa_tfm_ctx *ctx = crypto_aead_ctx(authenc);
1805 struct crypto_authenc_keys keys;
1806 int cmdl_len;
1807 struct sa_cmdl_cfg cfg;
1808 int key_idx;
1809
1810 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
1811 return -EINVAL;
1812
1813 /* Convert the key size (16/24/32) to the key size index (0/1/2) */
1814 key_idx = (keys.enckeylen >> 3) - 2;
1815 if (key_idx >= 3)
1816 return -EINVAL;
1817
1818 ad->ctx = ctx;
1819 ad->enc_eng.eng_id = SA_ENG_ID_EM1;
1820 ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
1821 ad->auth_eng.eng_id = SA_ENG_ID_AM1;
1822 ad->auth_eng.sc_size = SA_CTX_AUTH_TYPE2_SZ;
1823 ad->mci_enc = mci_cbc_enc_no_iv_array[key_idx];
1824 ad->mci_dec = mci_cbc_dec_no_iv_array[key_idx];
1825 ad->inv_key = true;
1826 ad->keyed_mac = true;
1827 ad->ealg_id = SA_EALG_ID_AES_CBC;
1828 ad->prep_iopad = sa_prepare_iopads;
1829
1830 memset(&cfg, 0, sizeof(cfg));
1831 cfg.enc = true;
1832 cfg.aalg = ad->aalg_id;
1833 cfg.enc_eng_id = ad->enc_eng.eng_id;
1834 cfg.auth_eng_id = ad->auth_eng.eng_id;
1835 cfg.iv_size = crypto_aead_ivsize(authenc);
1836 cfg.akey = keys.authkey;
1837 cfg.akey_len = keys.authkeylen;
1838
1839 /* Setup Encryption Security Context & Command label template */
1840 if (sa_init_sc(&ctx->enc, ctx->dev_data->match_data, keys.enckey,
1841 keys.enckeylen, keys.authkey, keys.authkeylen,
1842 ad, 1, &ctx->enc.epib[1]))
1843 return -EINVAL;
1844
1845 cmdl_len = sa_format_cmdl_gen(&cfg,
1846 (u8 *)ctx->enc.cmdl,
1847 &ctx->enc.cmdl_upd_info);
1848 if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
1849 return -EINVAL;
1850
1851 ctx->enc.cmdl_size = cmdl_len;
1852
1853 /* Setup Decryption Security Context & Command label template */
1854 if (sa_init_sc(&ctx->dec, ctx->dev_data->match_data, keys.enckey,
1855 keys.enckeylen, keys.authkey, keys.authkeylen,
1856 ad, 0, &ctx->dec.epib[1]))
1857 return -EINVAL;
1858
1859 cfg.enc = false;
1860 cmdl_len = sa_format_cmdl_gen(&cfg, (u8 *)ctx->dec.cmdl,
1861 &ctx->dec.cmdl_upd_info);
1862
1863 if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
1864 return -EINVAL;
1865
1866 ctx->dec.cmdl_size = cmdl_len;
1867
1868 crypto_aead_clear_flags(ctx->fallback.aead, CRYPTO_TFM_REQ_MASK);
1869 crypto_aead_set_flags(ctx->fallback.aead,
1870 crypto_aead_get_flags(authenc) &
1871 CRYPTO_TFM_REQ_MASK);
1872
1873 return crypto_aead_setkey(ctx->fallback.aead, key, keylen);
1874}
1875
1876static int sa_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
1877{
1878 struct sa_tfm_ctx *ctx = crypto_tfm_ctx(crypto_aead_tfm(tfm));
1879
1880 return crypto_aead_setauthsize(ctx->fallback.aead, authsize);
1881}
1882
1883static int sa_aead_cbc_sha1_setkey(struct crypto_aead *authenc,
1884 const u8 *key, unsigned int keylen)
1885{
1886 struct algo_data ad = { 0 };
1887
1888 ad.ealg_id = SA_EALG_ID_AES_CBC;
1889 ad.aalg_id = SA_AALG_ID_HMAC_SHA1;
1890 ad.hash_size = SHA1_DIGEST_SIZE;
1891 ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA1;
1892
1893 return sa_aead_setkey(authenc, key, keylen, &ad);
1894}
1895
1896static int sa_aead_cbc_sha256_setkey(struct crypto_aead *authenc,
1897 const u8 *key, unsigned int keylen)
1898{
1899 struct algo_data ad = { 0 };
1900
1901 ad.ealg_id = SA_EALG_ID_AES_CBC;
1902 ad.aalg_id = SA_AALG_ID_HMAC_SHA2_256;
1903 ad.hash_size = SHA256_DIGEST_SIZE;
1904 ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA256;
1905
1906 return sa_aead_setkey(authenc, key, keylen, &ad);
1907}
1908
1909static int sa_aead_run(struct aead_request *req, u8 *iv, int enc)
1910{
1911 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1912 struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
1913 struct sa_req sa_req = { 0 };
1914 size_t auth_size, enc_size;
1915
1916 enc_size = req->cryptlen;
1917 auth_size = req->assoclen + req->cryptlen;
1918
1919 if (!enc) {
1920 enc_size -= crypto_aead_authsize(tfm);
1921 auth_size -= crypto_aead_authsize(tfm);
1922 }
1923
1924 if (auth_size > SA_MAX_DATA_SZ ||
1925 (auth_size >= SA_UNSAFE_DATA_SZ_MIN &&
1926 auth_size <= SA_UNSAFE_DATA_SZ_MAX)) {
1927 struct aead_request *subreq = aead_request_ctx(req);
1928 int ret;
1929
1930 aead_request_set_tfm(subreq, ctx->fallback.aead);
1931 aead_request_set_callback(subreq, req->base.flags,
1932 req->base.complete, req->base.data);
1933 aead_request_set_crypt(subreq, req->src, req->dst,
1934 req->cryptlen, req->iv);
1935 aead_request_set_ad(subreq, req->assoclen);
1936
1937 ret = enc ? crypto_aead_encrypt(subreq) :
1938 crypto_aead_decrypt(subreq);
1939 return ret;
1940 }
1941
1942 sa_req.enc_offset = req->assoclen;
1943 sa_req.enc_size = enc_size;
1944 sa_req.auth_size = auth_size;
1945 sa_req.size = auth_size;
1946 sa_req.enc_iv = iv;
1947 sa_req.type = CRYPTO_ALG_TYPE_AEAD;
1948 sa_req.enc = enc;
1949 sa_req.callback = sa_aead_dma_in_callback;
1950 sa_req.mdata_size = 52;
1951 sa_req.base = &req->base;
1952 sa_req.ctx = ctx;
1953 sa_req.src = req->src;
1954 sa_req.dst = req->dst;
1955
1956 return sa_run(&sa_req);
1957}
1958
1959/* AEAD algorithm encrypt interface function */
1960static int sa_aead_encrypt(struct aead_request *req)
1961{
1962 return sa_aead_run(req, req->iv, 1);
1963}
1964
1965/* AEAD algorithm decrypt interface function */
1966static int sa_aead_decrypt(struct aead_request *req)
1967{
1968 return sa_aead_run(req, req->iv, 0);
1969}
1970
1971static struct sa_alg_tmpl sa_algs[] = {
1972 [SA_ALG_CBC_AES] = {
1973 .type = CRYPTO_ALG_TYPE_SKCIPHER,
1974 .alg.skcipher = {
1975 .base.cra_name = "cbc(aes)",
1976 .base.cra_driver_name = "cbc-aes-sa2ul",
1977 .base.cra_priority = 30000,
1978 .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
1979 CRYPTO_ALG_KERN_DRIVER_ONLY |
1980 CRYPTO_ALG_ASYNC |
1981 CRYPTO_ALG_NEED_FALLBACK,
1982 .base.cra_blocksize = AES_BLOCK_SIZE,
1983 .base.cra_ctxsize = sizeof(struct sa_tfm_ctx),
1984 .base.cra_module = THIS_MODULE,
1985 .init = sa_cipher_cra_init,
1986 .exit = sa_cipher_cra_exit,
1987 .min_keysize = AES_MIN_KEY_SIZE,
1988 .max_keysize = AES_MAX_KEY_SIZE,
1989 .ivsize = AES_BLOCK_SIZE,
1990 .setkey = sa_aes_cbc_setkey,
1991 .encrypt = sa_encrypt,
1992 .decrypt = sa_decrypt,
1993 }
1994 },
1995 [SA_ALG_EBC_AES] = {
1996 .type = CRYPTO_ALG_TYPE_SKCIPHER,
1997 .alg.skcipher = {
1998 .base.cra_name = "ecb(aes)",
1999 .base.cra_driver_name = "ecb-aes-sa2ul",
2000 .base.cra_priority = 30000,
2001 .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
2002 CRYPTO_ALG_KERN_DRIVER_ONLY |
2003 CRYPTO_ALG_ASYNC |
2004 CRYPTO_ALG_NEED_FALLBACK,
2005 .base.cra_blocksize = AES_BLOCK_SIZE,
2006 .base.cra_ctxsize = sizeof(struct sa_tfm_ctx),
2007 .base.cra_module = THIS_MODULE,
2008 .init = sa_cipher_cra_init,
2009 .exit = sa_cipher_cra_exit,
2010 .min_keysize = AES_MIN_KEY_SIZE,
2011 .max_keysize = AES_MAX_KEY_SIZE,
2012 .setkey = sa_aes_ecb_setkey,
2013 .encrypt = sa_encrypt,
2014 .decrypt = sa_decrypt,
2015 }
2016 },
2017 [SA_ALG_CBC_DES3] = {
2018 .type = CRYPTO_ALG_TYPE_SKCIPHER,
2019 .alg.skcipher = {
2020 .base.cra_name = "cbc(des3_ede)",
2021 .base.cra_driver_name = "cbc-des3-sa2ul",
2022 .base.cra_priority = 30000,
2023 .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
2024 CRYPTO_ALG_KERN_DRIVER_ONLY |
2025 CRYPTO_ALG_ASYNC |
2026 CRYPTO_ALG_NEED_FALLBACK,
2027 .base.cra_blocksize = DES_BLOCK_SIZE,
2028 .base.cra_ctxsize = sizeof(struct sa_tfm_ctx),
2029 .base.cra_module = THIS_MODULE,
2030 .init = sa_cipher_cra_init,
2031 .exit = sa_cipher_cra_exit,
2032 .min_keysize = 3 * DES_KEY_SIZE,
2033 .max_keysize = 3 * DES_KEY_SIZE,
2034 .ivsize = DES_BLOCK_SIZE,
2035 .setkey = sa_3des_cbc_setkey,
2036 .encrypt = sa_encrypt,
2037 .decrypt = sa_decrypt,
2038 }
2039 },
2040 [SA_ALG_ECB_DES3] = {
2041 .type = CRYPTO_ALG_TYPE_SKCIPHER,
2042 .alg.skcipher = {
2043 .base.cra_name = "ecb(des3_ede)",
2044 .base.cra_driver_name = "ecb-des3-sa2ul",
2045 .base.cra_priority = 30000,
2046 .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
2047 CRYPTO_ALG_KERN_DRIVER_ONLY |
2048 CRYPTO_ALG_ASYNC |
2049 CRYPTO_ALG_NEED_FALLBACK,
2050 .base.cra_blocksize = DES_BLOCK_SIZE,
2051 .base.cra_ctxsize = sizeof(struct sa_tfm_ctx),
2052 .base.cra_module = THIS_MODULE,
2053 .init = sa_cipher_cra_init,
2054 .exit = sa_cipher_cra_exit,
2055 .min_keysize = 3 * DES_KEY_SIZE,
2056 .max_keysize = 3 * DES_KEY_SIZE,
2057 .setkey = sa_3des_ecb_setkey,
2058 .encrypt = sa_encrypt,
2059 .decrypt = sa_decrypt,
2060 }
2061 },
2062 [SA_ALG_SHA1] = {
2063 .type = CRYPTO_ALG_TYPE_AHASH,
2064 .alg.ahash = {
2065 .halg.base = {
2066 .cra_name = "sha1",
2067 .cra_driver_name = "sha1-sa2ul",
2068 .cra_priority = 400,
2069 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2070 CRYPTO_ALG_ASYNC |
2071 CRYPTO_ALG_KERN_DRIVER_ONLY |
2072 CRYPTO_ALG_NEED_FALLBACK,
2073 .cra_blocksize = SHA1_BLOCK_SIZE,
2074 .cra_ctxsize = sizeof(struct sa_tfm_ctx),
2075 .cra_module = THIS_MODULE,
2076 .cra_init = sa_sha1_cra_init,
2077 .cra_exit = sa_sha_cra_exit,
2078 },
2079 .halg.digestsize = SHA1_DIGEST_SIZE,
2080 .halg.statesize = sizeof(struct sa_sha_req_ctx) +
2081 sizeof(struct sha1_state),
2082 .init = sa_sha_init,
2083 .update = sa_sha_update,
2084 .final = sa_sha_final,
2085 .finup = sa_sha_finup,
2086 .digest = sa_sha_digest,
2087 .export = sa_sha_export,
2088 .import = sa_sha_import,
2089 },
2090 },
2091 [SA_ALG_SHA256] = {
2092 .type = CRYPTO_ALG_TYPE_AHASH,
2093 .alg.ahash = {
2094 .halg.base = {
2095 .cra_name = "sha256",
2096 .cra_driver_name = "sha256-sa2ul",
2097 .cra_priority = 400,
2098 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2099 CRYPTO_ALG_ASYNC |
2100 CRYPTO_ALG_KERN_DRIVER_ONLY |
2101 CRYPTO_ALG_NEED_FALLBACK,
2102 .cra_blocksize = SHA256_BLOCK_SIZE,
2103 .cra_ctxsize = sizeof(struct sa_tfm_ctx),
2104 .cra_module = THIS_MODULE,
2105 .cra_init = sa_sha256_cra_init,
2106 .cra_exit = sa_sha_cra_exit,
2107 },
2108 .halg.digestsize = SHA256_DIGEST_SIZE,
2109 .halg.statesize = sizeof(struct sa_sha_req_ctx) +
2110 sizeof(struct sha256_state),
2111 .init = sa_sha_init,
2112 .update = sa_sha_update,
2113 .final = sa_sha_final,
2114 .finup = sa_sha_finup,
2115 .digest = sa_sha_digest,
2116 .export = sa_sha_export,
2117 .import = sa_sha_import,
2118 },
2119 },
2120 [SA_ALG_SHA512] = {
2121 .type = CRYPTO_ALG_TYPE_AHASH,
2122 .alg.ahash = {
2123 .halg.base = {
2124 .cra_name = "sha512",
2125 .cra_driver_name = "sha512-sa2ul",
2126 .cra_priority = 400,
2127 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2128 CRYPTO_ALG_ASYNC |
2129 CRYPTO_ALG_KERN_DRIVER_ONLY |
2130 CRYPTO_ALG_NEED_FALLBACK,
2131 .cra_blocksize = SHA512_BLOCK_SIZE,
2132 .cra_ctxsize = sizeof(struct sa_tfm_ctx),
2133 .cra_module = THIS_MODULE,
2134 .cra_init = sa_sha512_cra_init,
2135 .cra_exit = sa_sha_cra_exit,
2136 },
2137 .halg.digestsize = SHA512_DIGEST_SIZE,
2138 .halg.statesize = sizeof(struct sa_sha_req_ctx) +
2139 sizeof(struct sha512_state),
2140 .init = sa_sha_init,
2141 .update = sa_sha_update,
2142 .final = sa_sha_final,
2143 .finup = sa_sha_finup,
2144 .digest = sa_sha_digest,
2145 .export = sa_sha_export,
2146 .import = sa_sha_import,
2147 },
2148 },
2149 [SA_ALG_AUTHENC_SHA1_AES] = {
2150 .type = CRYPTO_ALG_TYPE_AEAD,
2151 .alg.aead = {
2152 .base = {
2153 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2154 .cra_driver_name =
2155 "authenc(hmac(sha1),cbc(aes))-sa2ul",
2156 .cra_blocksize = AES_BLOCK_SIZE,
2157 .cra_flags = CRYPTO_ALG_TYPE_AEAD |
2158 CRYPTO_ALG_KERN_DRIVER_ONLY |
2159 CRYPTO_ALG_ASYNC |
2160 CRYPTO_ALG_NEED_FALLBACK,
2161 .cra_ctxsize = sizeof(struct sa_tfm_ctx),
2162 .cra_module = THIS_MODULE,
2163 .cra_priority = 3000,
2164 },
2165 .ivsize = AES_BLOCK_SIZE,
2166 .maxauthsize = SHA1_DIGEST_SIZE,
2167
2168 .init = sa_cra_init_aead_sha1,
2169 .exit = sa_exit_tfm_aead,
2170 .setkey = sa_aead_cbc_sha1_setkey,
2171 .setauthsize = sa_aead_setauthsize,
2172 .encrypt = sa_aead_encrypt,
2173 .decrypt = sa_aead_decrypt,
2174 },
2175 },
2176 [SA_ALG_AUTHENC_SHA256_AES] = {
2177 .type = CRYPTO_ALG_TYPE_AEAD,
2178 .alg.aead = {
2179 .base = {
2180 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2181 .cra_driver_name =
2182 "authenc(hmac(sha256),cbc(aes))-sa2ul",
2183 .cra_blocksize = AES_BLOCK_SIZE,
2184 .cra_flags = CRYPTO_ALG_TYPE_AEAD |
2185 CRYPTO_ALG_KERN_DRIVER_ONLY |
2186 CRYPTO_ALG_ASYNC |
2187 CRYPTO_ALG_NEED_FALLBACK,
2188 .cra_ctxsize = sizeof(struct sa_tfm_ctx),
2189 .cra_module = THIS_MODULE,
2190 .cra_alignmask = 0,
2191 .cra_priority = 3000,
2192 },
2193 .ivsize = AES_BLOCK_SIZE,
2194 .maxauthsize = SHA256_DIGEST_SIZE,
2195
2196 .init = sa_cra_init_aead_sha256,
2197 .exit = sa_exit_tfm_aead,
2198 .setkey = sa_aead_cbc_sha256_setkey,
2199 .setauthsize = sa_aead_setauthsize,
2200 .encrypt = sa_aead_encrypt,
2201 .decrypt = sa_aead_decrypt,
2202 },
2203 },
2204};
2205
2206/* Register the algorithms in crypto framework */
2207static void sa_register_algos(struct sa_crypto_data *dev_data)
2208{
2209 const struct sa_match_data *match_data = dev_data->match_data;
2210 struct device *dev = dev_data->dev;
2211 char *alg_name;
2212 u32 type;
2213 int i, err;
2214
2215 for (i = 0; i < ARRAY_SIZE(sa_algs); i++) {
2216 /* Skip unsupported algos */
2217 if (!(match_data->supported_algos & BIT(i)))
2218 continue;
2219
2220 type = sa_algs[i].type;
2221 if (type == CRYPTO_ALG_TYPE_SKCIPHER) {
2222 alg_name = sa_algs[i].alg.skcipher.base.cra_name;
2223 err = crypto_register_skcipher(&sa_algs[i].alg.skcipher);
2224 } else if (type == CRYPTO_ALG_TYPE_AHASH) {
2225 alg_name = sa_algs[i].alg.ahash.halg.base.cra_name;
2226 err = crypto_register_ahash(&sa_algs[i].alg.ahash);
2227 } else if (type == CRYPTO_ALG_TYPE_AEAD) {
2228 alg_name = sa_algs[i].alg.aead.base.cra_name;
2229 err = crypto_register_aead(&sa_algs[i].alg.aead);
2230 } else {
2231 dev_err(dev,
2232 "un-supported crypto algorithm (%d)",
2233 sa_algs[i].type);
2234 continue;
2235 }
2236
2237 if (err)
2238 dev_err(dev, "Failed to register '%s'\n", alg_name);
2239 else
2240 sa_algs[i].registered = true;
2241 }
2242}
2243
2244/* Unregister the algorithms in crypto framework */
2245static void sa_unregister_algos(const struct device *dev)
2246{
2247 u32 type;
2248 int i;
2249
2250 for (i = 0; i < ARRAY_SIZE(sa_algs); i++) {
2251 type = sa_algs[i].type;
2252 if (!sa_algs[i].registered)
2253 continue;
2254 if (type == CRYPTO_ALG_TYPE_SKCIPHER)
2255 crypto_unregister_skcipher(&sa_algs[i].alg.skcipher);
2256 else if (type == CRYPTO_ALG_TYPE_AHASH)
2257 crypto_unregister_ahash(&sa_algs[i].alg.ahash);
2258 else if (type == CRYPTO_ALG_TYPE_AEAD)
2259 crypto_unregister_aead(&sa_algs[i].alg.aead);
2260
2261 sa_algs[i].registered = false;
2262 }
2263}
2264
2265static int sa_init_mem(struct sa_crypto_data *dev_data)
2266{
2267 struct device *dev = &dev_data->pdev->dev;
2268 /* Setup dma pool for security context buffers */
2269 dev_data->sc_pool = dma_pool_create("keystone-sc", dev,
2270 SA_CTX_MAX_SZ, 64, 0);
2271 if (!dev_data->sc_pool) {
2272 dev_err(dev, "Failed to create dma pool");
2273 return -ENOMEM;
2274 }
2275
2276 return 0;
2277}
2278
2279static int sa_dma_init(struct sa_crypto_data *dd)
2280{
2281 int ret;
2282 struct dma_slave_config cfg;
2283
2284 dd->dma_rx1 = NULL;
2285 dd->dma_tx = NULL;
2286 dd->dma_rx2 = NULL;
2287
2288 ret = dma_coerce_mask_and_coherent(dd->dev, DMA_BIT_MASK(48));
2289 if (ret)
2290 return ret;
2291
2292 dd->dma_rx1 = dma_request_chan(dd->dev, "rx1");
2293 if (IS_ERR(dd->dma_rx1))
2294 return dev_err_probe(dd->dev, PTR_ERR(dd->dma_rx1),
2295 "Unable to request rx1 DMA channel\n");
2296
2297 dd->dma_rx2 = dma_request_chan(dd->dev, "rx2");
2298 if (IS_ERR(dd->dma_rx2)) {
2299 ret = dev_err_probe(dd->dev, PTR_ERR(dd->dma_rx2),
2300 "Unable to request rx2 DMA channel\n");
2301 goto err_dma_rx2;
2302 }
2303
2304 dd->dma_tx = dma_request_chan(dd->dev, "tx");
2305 if (IS_ERR(dd->dma_tx)) {
2306 ret = dev_err_probe(dd->dev, PTR_ERR(dd->dma_tx),
2307 "Unable to request tx DMA channel\n");
2308 goto err_dma_tx;
2309 }
2310
2311 memzero_explicit(&cfg, sizeof(cfg));
2312
2313 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2314 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2315 cfg.src_maxburst = 4;
2316 cfg.dst_maxburst = 4;
2317
2318 ret = dmaengine_slave_config(dd->dma_rx1, &cfg);
2319 if (ret) {
2320 dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
2321 ret);
2322 goto err_dma_config;
2323 }
2324
2325 ret = dmaengine_slave_config(dd->dma_rx2, &cfg);
2326 if (ret) {
2327 dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
2328 ret);
2329 goto err_dma_config;
2330 }
2331
2332 ret = dmaengine_slave_config(dd->dma_tx, &cfg);
2333 if (ret) {
2334 dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
2335 ret);
2336 goto err_dma_config;
2337 }
2338
2339 return 0;
2340
2341err_dma_config:
2342 dma_release_channel(dd->dma_tx);
2343err_dma_tx:
2344 dma_release_channel(dd->dma_rx2);
2345err_dma_rx2:
2346 dma_release_channel(dd->dma_rx1);
2347
2348 return ret;
2349}
2350
2351static int sa_link_child(struct device *dev, void *data)
2352{
2353 struct device *parent = data;
2354
2355 device_link_add(dev, parent, DL_FLAG_AUTOPROBE_CONSUMER);
2356
2357 return 0;
2358}
2359
2360static struct sa_match_data am654_match_data = {
2361 .priv = 1,
2362 .priv_id = 1,
2363 .supported_algos = BIT(SA_ALG_CBC_AES) |
2364 BIT(SA_ALG_EBC_AES) |
2365 BIT(SA_ALG_CBC_DES3) |
2366 BIT(SA_ALG_ECB_DES3) |
2367 BIT(SA_ALG_SHA1) |
2368 BIT(SA_ALG_SHA256) |
2369 BIT(SA_ALG_SHA512) |
2370 BIT(SA_ALG_AUTHENC_SHA1_AES) |
2371 BIT(SA_ALG_AUTHENC_SHA256_AES),
2372};
2373
2374static struct sa_match_data am64_match_data = {
2375 .priv = 0,
2376 .priv_id = 0,
2377 .supported_algos = BIT(SA_ALG_CBC_AES) |
2378 BIT(SA_ALG_EBC_AES) |
2379 BIT(SA_ALG_SHA256) |
2380 BIT(SA_ALG_SHA512) |
2381 BIT(SA_ALG_AUTHENC_SHA256_AES),
2382};
2383
2384static const struct of_device_id of_match[] = {
2385 { .compatible = "ti,j721e-sa2ul", .data = &am654_match_data, },
2386 { .compatible = "ti,am654-sa2ul", .data = &am654_match_data, },
2387 { .compatible = "ti,am64-sa2ul", .data = &am64_match_data, },
2388 { .compatible = "ti,am62-sa3ul", .data = &am64_match_data, },
2389 {},
2390};
2391MODULE_DEVICE_TABLE(of, of_match);
2392
2393static int sa_ul_probe(struct platform_device *pdev)
2394{
2395 struct device *dev = &pdev->dev;
2396 struct device_node *node = dev->of_node;
2397 static void __iomem *saul_base;
2398 struct sa_crypto_data *dev_data;
2399 u32 status, val;
2400 int ret;
2401
2402 dev_data = devm_kzalloc(dev, sizeof(*dev_data), GFP_KERNEL);
2403 if (!dev_data)
2404 return -ENOMEM;
2405
2406 dev_data->match_data = of_device_get_match_data(dev);
2407 if (!dev_data->match_data)
2408 return -ENODEV;
2409
2410 saul_base = devm_platform_ioremap_resource(pdev, 0);
2411 if (IS_ERR(saul_base))
2412 return PTR_ERR(saul_base);
2413
2414 sa_k3_dev = dev;
2415 dev_data->dev = dev;
2416 dev_data->pdev = pdev;
2417 dev_data->base = saul_base;
2418 platform_set_drvdata(pdev, dev_data);
2419 dev_set_drvdata(sa_k3_dev, dev_data);
2420
2421 pm_runtime_enable(dev);
2422 ret = pm_runtime_resume_and_get(dev);
2423 if (ret < 0) {
2424 dev_err(dev, "%s: failed to get sync: %d\n", __func__, ret);
2425 pm_runtime_disable(dev);
2426 return ret;
2427 }
2428
2429 sa_init_mem(dev_data);
2430 ret = sa_dma_init(dev_data);
2431 if (ret)
2432 goto destroy_dma_pool;
2433
2434 spin_lock_init(&dev_data->scid_lock);
2435
2436 val = SA_EEC_ENCSS_EN | SA_EEC_AUTHSS_EN | SA_EEC_CTXCACH_EN |
2437 SA_EEC_CPPI_PORT_IN_EN | SA_EEC_CPPI_PORT_OUT_EN |
2438 SA_EEC_TRNG_EN;
2439 status = readl_relaxed(saul_base + SA_ENGINE_STATUS);
2440 /* Only enable engines if all are not already enabled */
2441 if (val & ~status)
2442 writel_relaxed(val, saul_base + SA_ENGINE_ENABLE_CONTROL);
2443
2444 sa_register_algos(dev_data);
2445
2446 ret = of_platform_populate(node, NULL, NULL, dev);
2447 if (ret)
2448 goto release_dma;
2449
2450 device_for_each_child(dev, dev, sa_link_child);
2451
2452 return 0;
2453
2454release_dma:
2455 sa_unregister_algos(dev);
2456
2457 dma_release_channel(dev_data->dma_rx2);
2458 dma_release_channel(dev_data->dma_rx1);
2459 dma_release_channel(dev_data->dma_tx);
2460
2461destroy_dma_pool:
2462 dma_pool_destroy(dev_data->sc_pool);
2463
2464 pm_runtime_put_sync(dev);
2465 pm_runtime_disable(dev);
2466
2467 return ret;
2468}
2469
2470static void sa_ul_remove(struct platform_device *pdev)
2471{
2472 struct sa_crypto_data *dev_data = platform_get_drvdata(pdev);
2473
2474 of_platform_depopulate(&pdev->dev);
2475
2476 sa_unregister_algos(&pdev->dev);
2477
2478 dma_release_channel(dev_data->dma_rx2);
2479 dma_release_channel(dev_data->dma_rx1);
2480 dma_release_channel(dev_data->dma_tx);
2481
2482 dma_pool_destroy(dev_data->sc_pool);
2483
2484 platform_set_drvdata(pdev, NULL);
2485
2486 pm_runtime_put_sync(&pdev->dev);
2487 pm_runtime_disable(&pdev->dev);
2488}
2489
2490static struct platform_driver sa_ul_driver = {
2491 .probe = sa_ul_probe,
2492 .remove_new = sa_ul_remove,
2493 .driver = {
2494 .name = "saul-crypto",
2495 .of_match_table = of_match,
2496 },
2497};
2498module_platform_driver(sa_ul_driver);
2499MODULE_LICENSE("GPL v2");
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * K3 SA2UL crypto accelerator driver
4 *
5 * Copyright (C) 2018-2020 Texas Instruments Incorporated - http://www.ti.com
6 *
7 * Authors: Keerthy
8 * Vitaly Andrianov
9 * Tero Kristo
10 */
11#include <linux/bitfield.h>
12#include <linux/clk.h>
13#include <linux/dma-mapping.h>
14#include <linux/dmaengine.h>
15#include <linux/dmapool.h>
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/of_device.h>
19#include <linux/platform_device.h>
20#include <linux/pm_runtime.h>
21
22#include <crypto/aes.h>
23#include <crypto/authenc.h>
24#include <crypto/des.h>
25#include <crypto/internal/aead.h>
26#include <crypto/internal/hash.h>
27#include <crypto/internal/skcipher.h>
28#include <crypto/scatterwalk.h>
29#include <crypto/sha1.h>
30#include <crypto/sha2.h>
31
32#include "sa2ul.h"
33
34/* Byte offset for key in encryption security context */
35#define SC_ENC_KEY_OFFSET (1 + 27 + 4)
36/* Byte offset for Aux-1 in encryption security context */
37#define SC_ENC_AUX1_OFFSET (1 + 27 + 4 + 32)
38
39#define SA_CMDL_UPD_ENC 0x0001
40#define SA_CMDL_UPD_AUTH 0x0002
41#define SA_CMDL_UPD_ENC_IV 0x0004
42#define SA_CMDL_UPD_AUTH_IV 0x0008
43#define SA_CMDL_UPD_AUX_KEY 0x0010
44
45#define SA_AUTH_SUBKEY_LEN 16
46#define SA_CMDL_PAYLOAD_LENGTH_MASK 0xFFFF
47#define SA_CMDL_SOP_BYPASS_LEN_MASK 0xFF000000
48
49#define MODE_CONTROL_BYTES 27
50#define SA_HASH_PROCESSING 0
51#define SA_CRYPTO_PROCESSING 0
52#define SA_UPLOAD_HASH_TO_TLR BIT(6)
53
54#define SA_SW0_FLAGS_MASK 0xF0000
55#define SA_SW0_CMDL_INFO_MASK 0x1F00000
56#define SA_SW0_CMDL_PRESENT BIT(4)
57#define SA_SW0_ENG_ID_MASK 0x3E000000
58#define SA_SW0_DEST_INFO_PRESENT BIT(30)
59#define SA_SW2_EGRESS_LENGTH 0xFF000000
60#define SA_BASIC_HASH 0x10
61
62#define SHA256_DIGEST_WORDS 8
63/* Make 32-bit word from 4 bytes */
64#define SA_MK_U32(b0, b1, b2, b3) (((b0) << 24) | ((b1) << 16) | \
65 ((b2) << 8) | (b3))
66
67/* size of SCCTL structure in bytes */
68#define SA_SCCTL_SZ 16
69
70/* Max Authentication tag size */
71#define SA_MAX_AUTH_TAG_SZ 64
72
73enum sa_algo_id {
74 SA_ALG_CBC_AES = 0,
75 SA_ALG_EBC_AES,
76 SA_ALG_CBC_DES3,
77 SA_ALG_ECB_DES3,
78 SA_ALG_SHA1,
79 SA_ALG_SHA256,
80 SA_ALG_SHA512,
81 SA_ALG_AUTHENC_SHA1_AES,
82 SA_ALG_AUTHENC_SHA256_AES,
83};
84
85struct sa_match_data {
86 u8 priv;
87 u8 priv_id;
88 u32 supported_algos;
89};
90
91static struct device *sa_k3_dev;
92
93/**
94 * struct sa_cmdl_cfg - Command label configuration descriptor
95 * @aalg: authentication algorithm ID
96 * @enc_eng_id: Encryption Engine ID supported by the SA hardware
97 * @auth_eng_id: Authentication Engine ID
98 * @iv_size: Initialization Vector size
99 * @akey: Authentication key
100 * @akey_len: Authentication key length
101 * @enc: True, if this is an encode request
102 */
103struct sa_cmdl_cfg {
104 int aalg;
105 u8 enc_eng_id;
106 u8 auth_eng_id;
107 u8 iv_size;
108 const u8 *akey;
109 u16 akey_len;
110 bool enc;
111};
112
113/**
114 * struct algo_data - Crypto algorithm specific data
115 * @enc_eng: Encryption engine info structure
116 * @auth_eng: Authentication engine info structure
117 * @auth_ctrl: Authentication control word
118 * @hash_size: Size of digest
119 * @iv_idx: iv index in psdata
120 * @iv_out_size: iv out size
121 * @ealg_id: Encryption Algorithm ID
122 * @aalg_id: Authentication algorithm ID
123 * @mci_enc: Mode Control Instruction for Encryption algorithm
124 * @mci_dec: Mode Control Instruction for Decryption
125 * @inv_key: Whether the encryption algorithm demands key inversion
126 * @ctx: Pointer to the algorithm context
127 * @keyed_mac: Whether the authentication algorithm has key
128 * @prep_iopad: Function pointer to generate intermediate ipad/opad
129 */
130struct algo_data {
131 struct sa_eng_info enc_eng;
132 struct sa_eng_info auth_eng;
133 u8 auth_ctrl;
134 u8 hash_size;
135 u8 iv_idx;
136 u8 iv_out_size;
137 u8 ealg_id;
138 u8 aalg_id;
139 u8 *mci_enc;
140 u8 *mci_dec;
141 bool inv_key;
142 struct sa_tfm_ctx *ctx;
143 bool keyed_mac;
144 void (*prep_iopad)(struct algo_data *algo, const u8 *key,
145 u16 key_sz, __be32 *ipad, __be32 *opad);
146};
147
148/**
149 * struct sa_alg_tmpl: A generic template encompassing crypto/aead algorithms
150 * @type: Type of the crypto algorithm.
151 * @alg: Union of crypto algorithm definitions.
152 * @registered: Flag indicating if the crypto algorithm is already registered
153 */
154struct sa_alg_tmpl {
155 u32 type; /* CRYPTO_ALG_TYPE from <linux/crypto.h> */
156 union {
157 struct skcipher_alg skcipher;
158 struct ahash_alg ahash;
159 struct aead_alg aead;
160 } alg;
161 bool registered;
162};
163
164/**
165 * struct sa_mapped_sg: scatterlist information for tx and rx
166 * @mapped: Set to true if the @sgt is mapped
167 * @dir: mapping direction used for @sgt
168 * @split_sg: Set if the sg is split and needs to be freed up
169 * @static_sg: Static scatterlist entry for overriding data
170 * @sgt: scatterlist table for DMA API use
171 */
172struct sa_mapped_sg {
173 bool mapped;
174 enum dma_data_direction dir;
175 struct scatterlist static_sg;
176 struct scatterlist *split_sg;
177 struct sg_table sgt;
178};
179/**
180 * struct sa_rx_data: RX Packet miscellaneous data place holder
181 * @req: crypto request data pointer
182 * @ddev: pointer to the DMA device
183 * @tx_in: dma_async_tx_descriptor pointer for rx channel
184 * @mapped_sg: Information on tx (0) and rx (1) scatterlist DMA mapping
185 * @enc: Flag indicating either encryption or decryption
186 * @enc_iv_size: Initialisation vector size
187 * @iv_idx: Initialisation vector index
188 */
189struct sa_rx_data {
190 void *req;
191 struct device *ddev;
192 struct dma_async_tx_descriptor *tx_in;
193 struct sa_mapped_sg mapped_sg[2];
194 u8 enc;
195 u8 enc_iv_size;
196 u8 iv_idx;
197};
198
199/**
200 * struct sa_req: SA request definition
201 * @dev: device for the request
202 * @size: total data to the xmitted via DMA
203 * @enc_offset: offset of cipher data
204 * @enc_size: data to be passed to cipher engine
205 * @enc_iv: cipher IV
206 * @auth_offset: offset of the authentication data
207 * @auth_size: size of the authentication data
208 * @auth_iv: authentication IV
209 * @type: algorithm type for the request
210 * @cmdl: command label pointer
211 * @base: pointer to the base request
212 * @ctx: pointer to the algorithm context data
213 * @enc: true if this is an encode request
214 * @src: source data
215 * @dst: destination data
216 * @callback: DMA callback for the request
217 * @mdata_size: metadata size passed to DMA
218 */
219struct sa_req {
220 struct device *dev;
221 u16 size;
222 u8 enc_offset;
223 u16 enc_size;
224 u8 *enc_iv;
225 u8 auth_offset;
226 u16 auth_size;
227 u8 *auth_iv;
228 u32 type;
229 u32 *cmdl;
230 struct crypto_async_request *base;
231 struct sa_tfm_ctx *ctx;
232 bool enc;
233 struct scatterlist *src;
234 struct scatterlist *dst;
235 dma_async_tx_callback callback;
236 u16 mdata_size;
237};
238
239/*
240 * Mode Control Instructions for various Key lengths 128, 192, 256
241 * For CBC (Cipher Block Chaining) mode for encryption
242 */
243static u8 mci_cbc_enc_array[3][MODE_CONTROL_BYTES] = {
244 { 0x61, 0x00, 0x00, 0x18, 0x88, 0x0a, 0xaa, 0x4b, 0x7e, 0x00,
245 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
246 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
247 { 0x61, 0x00, 0x00, 0x18, 0x88, 0x4a, 0xaa, 0x4b, 0x7e, 0x00,
248 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
249 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
250 { 0x61, 0x00, 0x00, 0x18, 0x88, 0x8a, 0xaa, 0x4b, 0x7e, 0x00,
251 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
252 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
253};
254
255/*
256 * Mode Control Instructions for various Key lengths 128, 192, 256
257 * For CBC (Cipher Block Chaining) mode for decryption
258 */
259static u8 mci_cbc_dec_array[3][MODE_CONTROL_BYTES] = {
260 { 0x71, 0x00, 0x00, 0x80, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
261 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
262 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
263 { 0x71, 0x00, 0x00, 0x84, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
264 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
265 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
266 { 0x71, 0x00, 0x00, 0x88, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
267 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
268 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
269};
270
271/*
272 * Mode Control Instructions for various Key lengths 128, 192, 256
273 * For CBC (Cipher Block Chaining) mode for encryption
274 */
275static u8 mci_cbc_enc_no_iv_array[3][MODE_CONTROL_BYTES] = {
276 { 0x21, 0x00, 0x00, 0x18, 0x88, 0x0a, 0xaa, 0x4b, 0x7e, 0x00,
277 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
278 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
279 { 0x21, 0x00, 0x00, 0x18, 0x88, 0x4a, 0xaa, 0x4b, 0x7e, 0x00,
280 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
281 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
282 { 0x21, 0x00, 0x00, 0x18, 0x88, 0x8a, 0xaa, 0x4b, 0x7e, 0x00,
283 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
284 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
285};
286
287/*
288 * Mode Control Instructions for various Key lengths 128, 192, 256
289 * For CBC (Cipher Block Chaining) mode for decryption
290 */
291static u8 mci_cbc_dec_no_iv_array[3][MODE_CONTROL_BYTES] = {
292 { 0x31, 0x00, 0x00, 0x80, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
293 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
294 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
295 { 0x31, 0x00, 0x00, 0x84, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
296 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
297 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
298 { 0x31, 0x00, 0x00, 0x88, 0x8a, 0xca, 0x98, 0xf4, 0x40, 0xc0,
299 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
300 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
301};
302
303/*
304 * Mode Control Instructions for various Key lengths 128, 192, 256
305 * For ECB (Electronic Code Book) mode for encryption
306 */
307static u8 mci_ecb_enc_array[3][27] = {
308 { 0x21, 0x00, 0x00, 0x80, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
309 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
310 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
311 { 0x21, 0x00, 0x00, 0x84, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
312 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
313 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
314 { 0x21, 0x00, 0x00, 0x88, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
315 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
316 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
317};
318
319/*
320 * Mode Control Instructions for various Key lengths 128, 192, 256
321 * For ECB (Electronic Code Book) mode for decryption
322 */
323static u8 mci_ecb_dec_array[3][27] = {
324 { 0x31, 0x00, 0x00, 0x80, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
325 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
326 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
327 { 0x31, 0x00, 0x00, 0x84, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
328 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
329 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
330 { 0x31, 0x00, 0x00, 0x88, 0x8a, 0x04, 0xb7, 0x90, 0x00, 0x00,
331 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
332 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
333};
334
335/*
336 * Mode Control Instructions for DES algorithm
337 * For CBC (Cipher Block Chaining) mode and ECB mode
338 * encryption and for decryption respectively
339 */
340static u8 mci_cbc_3des_enc_array[MODE_CONTROL_BYTES] = {
341 0x60, 0x00, 0x00, 0x18, 0x88, 0x52, 0xaa, 0x4b, 0x7e, 0x00, 0x00, 0x00,
342 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
343 0x00, 0x00, 0x00,
344};
345
346static u8 mci_cbc_3des_dec_array[MODE_CONTROL_BYTES] = {
347 0x70, 0x00, 0x00, 0x85, 0x0a, 0xca, 0x98, 0xf4, 0x40, 0xc0, 0x00, 0x00,
348 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
349 0x00, 0x00, 0x00,
350};
351
352static u8 mci_ecb_3des_enc_array[MODE_CONTROL_BYTES] = {
353 0x20, 0x00, 0x00, 0x85, 0x0a, 0x04, 0xb7, 0x90, 0x00, 0x00, 0x00, 0x00,
354 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
355 0x00, 0x00, 0x00,
356};
357
358static u8 mci_ecb_3des_dec_array[MODE_CONTROL_BYTES] = {
359 0x30, 0x00, 0x00, 0x85, 0x0a, 0x04, 0xb7, 0x90, 0x00, 0x00, 0x00, 0x00,
360 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
361 0x00, 0x00, 0x00,
362};
363
364/*
365 * Perform 16 byte or 128 bit swizzling
366 * The SA2UL Expects the security context to
367 * be in little Endian and the bus width is 128 bits or 16 bytes
368 * Hence swap 16 bytes at a time from higher to lower address
369 */
370static void sa_swiz_128(u8 *in, u16 len)
371{
372 u8 data[16];
373 int i, j;
374
375 for (i = 0; i < len; i += 16) {
376 memcpy(data, &in[i], 16);
377 for (j = 0; j < 16; j++)
378 in[i + j] = data[15 - j];
379 }
380}
381
382/* Prepare the ipad and opad from key as per SHA algorithm step 1*/
383static void prepare_kipad(u8 *k_ipad, const u8 *key, u16 key_sz)
384{
385 int i;
386
387 for (i = 0; i < key_sz; i++)
388 k_ipad[i] = key[i] ^ 0x36;
389
390 /* Instead of XOR with 0 */
391 for (; i < SHA1_BLOCK_SIZE; i++)
392 k_ipad[i] = 0x36;
393}
394
395static void prepare_kopad(u8 *k_opad, const u8 *key, u16 key_sz)
396{
397 int i;
398
399 for (i = 0; i < key_sz; i++)
400 k_opad[i] = key[i] ^ 0x5c;
401
402 /* Instead of XOR with 0 */
403 for (; i < SHA1_BLOCK_SIZE; i++)
404 k_opad[i] = 0x5c;
405}
406
407static void sa_export_shash(void *state, struct shash_desc *hash,
408 int digest_size, __be32 *out)
409{
410 struct sha1_state *sha1;
411 struct sha256_state *sha256;
412 u32 *result;
413
414 switch (digest_size) {
415 case SHA1_DIGEST_SIZE:
416 sha1 = state;
417 result = sha1->state;
418 break;
419 case SHA256_DIGEST_SIZE:
420 sha256 = state;
421 result = sha256->state;
422 break;
423 default:
424 dev_err(sa_k3_dev, "%s: bad digest_size=%d\n", __func__,
425 digest_size);
426 return;
427 }
428
429 crypto_shash_export(hash, state);
430
431 cpu_to_be32_array(out, result, digest_size / 4);
432}
433
434static void sa_prepare_iopads(struct algo_data *data, const u8 *key,
435 u16 key_sz, __be32 *ipad, __be32 *opad)
436{
437 SHASH_DESC_ON_STACK(shash, data->ctx->shash);
438 int block_size = crypto_shash_blocksize(data->ctx->shash);
439 int digest_size = crypto_shash_digestsize(data->ctx->shash);
440 union {
441 struct sha1_state sha1;
442 struct sha256_state sha256;
443 u8 k_pad[SHA1_BLOCK_SIZE];
444 } sha;
445
446 shash->tfm = data->ctx->shash;
447
448 prepare_kipad(sha.k_pad, key, key_sz);
449
450 crypto_shash_init(shash);
451 crypto_shash_update(shash, sha.k_pad, block_size);
452 sa_export_shash(&sha, shash, digest_size, ipad);
453
454 prepare_kopad(sha.k_pad, key, key_sz);
455
456 crypto_shash_init(shash);
457 crypto_shash_update(shash, sha.k_pad, block_size);
458
459 sa_export_shash(&sha, shash, digest_size, opad);
460
461 memzero_explicit(&sha, sizeof(sha));
462}
463
464/* Derive the inverse key used in AES-CBC decryption operation */
465static inline int sa_aes_inv_key(u8 *inv_key, const u8 *key, u16 key_sz)
466{
467 struct crypto_aes_ctx ctx;
468 int key_pos;
469
470 if (aes_expandkey(&ctx, key, key_sz)) {
471 dev_err(sa_k3_dev, "%s: bad key len(%d)\n", __func__, key_sz);
472 return -EINVAL;
473 }
474
475 /* work around to get the right inverse for AES_KEYSIZE_192 size keys */
476 if (key_sz == AES_KEYSIZE_192) {
477 ctx.key_enc[52] = ctx.key_enc[51] ^ ctx.key_enc[46];
478 ctx.key_enc[53] = ctx.key_enc[52] ^ ctx.key_enc[47];
479 }
480
481 /* Based crypto_aes_expand_key logic */
482 switch (key_sz) {
483 case AES_KEYSIZE_128:
484 case AES_KEYSIZE_192:
485 key_pos = key_sz + 24;
486 break;
487
488 case AES_KEYSIZE_256:
489 key_pos = key_sz + 24 - 4;
490 break;
491
492 default:
493 dev_err(sa_k3_dev, "%s: bad key len(%d)\n", __func__, key_sz);
494 return -EINVAL;
495 }
496
497 memcpy(inv_key, &ctx.key_enc[key_pos], key_sz);
498 return 0;
499}
500
501/* Set Security context for the encryption engine */
502static int sa_set_sc_enc(struct algo_data *ad, const u8 *key, u16 key_sz,
503 u8 enc, u8 *sc_buf)
504{
505 const u8 *mci = NULL;
506
507 /* Set Encryption mode selector to crypto processing */
508 sc_buf[0] = SA_CRYPTO_PROCESSING;
509
510 if (enc)
511 mci = ad->mci_enc;
512 else
513 mci = ad->mci_dec;
514 /* Set the mode control instructions in security context */
515 if (mci)
516 memcpy(&sc_buf[1], mci, MODE_CONTROL_BYTES);
517
518 /* For AES-CBC decryption get the inverse key */
519 if (ad->inv_key && !enc) {
520 if (sa_aes_inv_key(&sc_buf[SC_ENC_KEY_OFFSET], key, key_sz))
521 return -EINVAL;
522 /* For all other cases: key is used */
523 } else {
524 memcpy(&sc_buf[SC_ENC_KEY_OFFSET], key, key_sz);
525 }
526
527 return 0;
528}
529
530/* Set Security context for the authentication engine */
531static void sa_set_sc_auth(struct algo_data *ad, const u8 *key, u16 key_sz,
532 u8 *sc_buf)
533{
534 __be32 *ipad = (void *)(sc_buf + 32);
535 __be32 *opad = (void *)(sc_buf + 64);
536
537 /* Set Authentication mode selector to hash processing */
538 sc_buf[0] = SA_HASH_PROCESSING;
539 /* Auth SW ctrl word: bit[6]=1 (upload computed hash to TLR section) */
540 sc_buf[1] = SA_UPLOAD_HASH_TO_TLR;
541 sc_buf[1] |= ad->auth_ctrl;
542
543 /* Copy the keys or ipad/opad */
544 if (ad->keyed_mac)
545 ad->prep_iopad(ad, key, key_sz, ipad, opad);
546 else {
547 /* basic hash */
548 sc_buf[1] |= SA_BASIC_HASH;
549 }
550}
551
552static inline void sa_copy_iv(__be32 *out, const u8 *iv, bool size16)
553{
554 int j;
555
556 for (j = 0; j < ((size16) ? 4 : 2); j++) {
557 *out = cpu_to_be32(*((u32 *)iv));
558 iv += 4;
559 out++;
560 }
561}
562
563/* Format general command label */
564static int sa_format_cmdl_gen(struct sa_cmdl_cfg *cfg, u8 *cmdl,
565 struct sa_cmdl_upd_info *upd_info)
566{
567 u8 enc_offset = 0, auth_offset = 0, total = 0;
568 u8 enc_next_eng = SA_ENG_ID_OUTPORT2;
569 u8 auth_next_eng = SA_ENG_ID_OUTPORT2;
570 u32 *word_ptr = (u32 *)cmdl;
571 int i;
572
573 /* Clear the command label */
574 memzero_explicit(cmdl, (SA_MAX_CMDL_WORDS * sizeof(u32)));
575
576 /* Iniialize the command update structure */
577 memzero_explicit(upd_info, sizeof(*upd_info));
578
579 if (cfg->enc_eng_id && cfg->auth_eng_id) {
580 if (cfg->enc) {
581 auth_offset = SA_CMDL_HEADER_SIZE_BYTES;
582 enc_next_eng = cfg->auth_eng_id;
583
584 if (cfg->iv_size)
585 auth_offset += cfg->iv_size;
586 } else {
587 enc_offset = SA_CMDL_HEADER_SIZE_BYTES;
588 auth_next_eng = cfg->enc_eng_id;
589 }
590 }
591
592 if (cfg->enc_eng_id) {
593 upd_info->flags |= SA_CMDL_UPD_ENC;
594 upd_info->enc_size.index = enc_offset >> 2;
595 upd_info->enc_offset.index = upd_info->enc_size.index + 1;
596 /* Encryption command label */
597 cmdl[enc_offset + SA_CMDL_OFFSET_NESC] = enc_next_eng;
598
599 /* Encryption modes requiring IV */
600 if (cfg->iv_size) {
601 upd_info->flags |= SA_CMDL_UPD_ENC_IV;
602 upd_info->enc_iv.index =
603 (enc_offset + SA_CMDL_HEADER_SIZE_BYTES) >> 2;
604 upd_info->enc_iv.size = cfg->iv_size;
605
606 cmdl[enc_offset + SA_CMDL_OFFSET_LABEL_LEN] =
607 SA_CMDL_HEADER_SIZE_BYTES + cfg->iv_size;
608
609 cmdl[enc_offset + SA_CMDL_OFFSET_OPTION_CTRL1] =
610 (SA_CTX_ENC_AUX2_OFFSET | (cfg->iv_size >> 3));
611 total += SA_CMDL_HEADER_SIZE_BYTES + cfg->iv_size;
612 } else {
613 cmdl[enc_offset + SA_CMDL_OFFSET_LABEL_LEN] =
614 SA_CMDL_HEADER_SIZE_BYTES;
615 total += SA_CMDL_HEADER_SIZE_BYTES;
616 }
617 }
618
619 if (cfg->auth_eng_id) {
620 upd_info->flags |= SA_CMDL_UPD_AUTH;
621 upd_info->auth_size.index = auth_offset >> 2;
622 upd_info->auth_offset.index = upd_info->auth_size.index + 1;
623 cmdl[auth_offset + SA_CMDL_OFFSET_NESC] = auth_next_eng;
624 cmdl[auth_offset + SA_CMDL_OFFSET_LABEL_LEN] =
625 SA_CMDL_HEADER_SIZE_BYTES;
626 total += SA_CMDL_HEADER_SIZE_BYTES;
627 }
628
629 total = roundup(total, 8);
630
631 for (i = 0; i < total / 4; i++)
632 word_ptr[i] = swab32(word_ptr[i]);
633
634 return total;
635}
636
637/* Update Command label */
638static inline void sa_update_cmdl(struct sa_req *req, u32 *cmdl,
639 struct sa_cmdl_upd_info *upd_info)
640{
641 int i = 0, j;
642
643 if (likely(upd_info->flags & SA_CMDL_UPD_ENC)) {
644 cmdl[upd_info->enc_size.index] &= ~SA_CMDL_PAYLOAD_LENGTH_MASK;
645 cmdl[upd_info->enc_size.index] |= req->enc_size;
646 cmdl[upd_info->enc_offset.index] &=
647 ~SA_CMDL_SOP_BYPASS_LEN_MASK;
648 cmdl[upd_info->enc_offset.index] |=
649 FIELD_PREP(SA_CMDL_SOP_BYPASS_LEN_MASK,
650 req->enc_offset);
651
652 if (likely(upd_info->flags & SA_CMDL_UPD_ENC_IV)) {
653 __be32 *data = (__be32 *)&cmdl[upd_info->enc_iv.index];
654 u32 *enc_iv = (u32 *)req->enc_iv;
655
656 for (j = 0; i < upd_info->enc_iv.size; i += 4, j++) {
657 data[j] = cpu_to_be32(*enc_iv);
658 enc_iv++;
659 }
660 }
661 }
662
663 if (likely(upd_info->flags & SA_CMDL_UPD_AUTH)) {
664 cmdl[upd_info->auth_size.index] &= ~SA_CMDL_PAYLOAD_LENGTH_MASK;
665 cmdl[upd_info->auth_size.index] |= req->auth_size;
666 cmdl[upd_info->auth_offset.index] &=
667 ~SA_CMDL_SOP_BYPASS_LEN_MASK;
668 cmdl[upd_info->auth_offset.index] |=
669 FIELD_PREP(SA_CMDL_SOP_BYPASS_LEN_MASK,
670 req->auth_offset);
671 if (upd_info->flags & SA_CMDL_UPD_AUTH_IV) {
672 sa_copy_iv((void *)&cmdl[upd_info->auth_iv.index],
673 req->auth_iv,
674 (upd_info->auth_iv.size > 8));
675 }
676 if (upd_info->flags & SA_CMDL_UPD_AUX_KEY) {
677 int offset = (req->auth_size & 0xF) ? 4 : 0;
678
679 memcpy(&cmdl[upd_info->aux_key_info.index],
680 &upd_info->aux_key[offset], 16);
681 }
682 }
683}
684
685/* Format SWINFO words to be sent to SA */
686static
687void sa_set_swinfo(u8 eng_id, u16 sc_id, dma_addr_t sc_phys,
688 u8 cmdl_present, u8 cmdl_offset, u8 flags,
689 u8 hash_size, u32 *swinfo)
690{
691 swinfo[0] = sc_id;
692 swinfo[0] |= FIELD_PREP(SA_SW0_FLAGS_MASK, flags);
693 if (likely(cmdl_present))
694 swinfo[0] |= FIELD_PREP(SA_SW0_CMDL_INFO_MASK,
695 cmdl_offset | SA_SW0_CMDL_PRESENT);
696 swinfo[0] |= FIELD_PREP(SA_SW0_ENG_ID_MASK, eng_id);
697
698 swinfo[0] |= SA_SW0_DEST_INFO_PRESENT;
699 swinfo[1] = (u32)(sc_phys & 0xFFFFFFFFULL);
700 swinfo[2] = (u32)((sc_phys & 0xFFFFFFFF00000000ULL) >> 32);
701 swinfo[2] |= FIELD_PREP(SA_SW2_EGRESS_LENGTH, hash_size);
702}
703
704/* Dump the security context */
705static void sa_dump_sc(u8 *buf, dma_addr_t dma_addr)
706{
707#ifdef DEBUG
708 dev_info(sa_k3_dev, "Security context dump:: 0x%pad\n", &dma_addr);
709 print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
710 16, 1, buf, SA_CTX_MAX_SZ, false);
711#endif
712}
713
714static
715int sa_init_sc(struct sa_ctx_info *ctx, const struct sa_match_data *match_data,
716 const u8 *enc_key, u16 enc_key_sz,
717 const u8 *auth_key, u16 auth_key_sz,
718 struct algo_data *ad, u8 enc, u32 *swinfo)
719{
720 int enc_sc_offset = 0;
721 int auth_sc_offset = 0;
722 u8 *sc_buf = ctx->sc;
723 u16 sc_id = ctx->sc_id;
724 u8 first_engine = 0;
725
726 memzero_explicit(sc_buf, SA_CTX_MAX_SZ);
727
728 if (ad->auth_eng.eng_id) {
729 if (enc)
730 first_engine = ad->enc_eng.eng_id;
731 else
732 first_engine = ad->auth_eng.eng_id;
733
734 enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ;
735 auth_sc_offset = enc_sc_offset + ad->enc_eng.sc_size;
736 sc_buf[1] = SA_SCCTL_FE_AUTH_ENC;
737 if (!ad->hash_size)
738 return -EINVAL;
739 ad->hash_size = roundup(ad->hash_size, 8);
740
741 } else if (ad->enc_eng.eng_id && !ad->auth_eng.eng_id) {
742 enc_sc_offset = SA_CTX_PHP_PE_CTX_SZ;
743 first_engine = ad->enc_eng.eng_id;
744 sc_buf[1] = SA_SCCTL_FE_ENC;
745 ad->hash_size = ad->iv_out_size;
746 }
747
748 /* SCCTL Owner info: 0=host, 1=CP_ACE */
749 sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0;
750 memcpy(&sc_buf[2], &sc_id, 2);
751 sc_buf[4] = 0x0;
752 sc_buf[5] = match_data->priv_id;
753 sc_buf[6] = match_data->priv;
754 sc_buf[7] = 0x0;
755
756 /* Prepare context for encryption engine */
757 if (ad->enc_eng.sc_size) {
758 if (sa_set_sc_enc(ad, enc_key, enc_key_sz, enc,
759 &sc_buf[enc_sc_offset]))
760 return -EINVAL;
761 }
762
763 /* Prepare context for authentication engine */
764 if (ad->auth_eng.sc_size)
765 sa_set_sc_auth(ad, auth_key, auth_key_sz,
766 &sc_buf[auth_sc_offset]);
767
768 /* Set the ownership of context to CP_ACE */
769 sc_buf[SA_CTX_SCCTL_OWNER_OFFSET] = 0x80;
770
771 /* swizzle the security context */
772 sa_swiz_128(sc_buf, SA_CTX_MAX_SZ);
773
774 sa_set_swinfo(first_engine, ctx->sc_id, ctx->sc_phys, 1, 0,
775 SA_SW_INFO_FLAG_EVICT, ad->hash_size, swinfo);
776
777 sa_dump_sc(sc_buf, ctx->sc_phys);
778
779 return 0;
780}
781
782/* Free the per direction context memory */
783static void sa_free_ctx_info(struct sa_ctx_info *ctx,
784 struct sa_crypto_data *data)
785{
786 unsigned long bn;
787
788 bn = ctx->sc_id - data->sc_id_start;
789 spin_lock(&data->scid_lock);
790 __clear_bit(bn, data->ctx_bm);
791 data->sc_id--;
792 spin_unlock(&data->scid_lock);
793
794 if (ctx->sc) {
795 dma_pool_free(data->sc_pool, ctx->sc, ctx->sc_phys);
796 ctx->sc = NULL;
797 }
798}
799
800static int sa_init_ctx_info(struct sa_ctx_info *ctx,
801 struct sa_crypto_data *data)
802{
803 unsigned long bn;
804 int err;
805
806 spin_lock(&data->scid_lock);
807 bn = find_first_zero_bit(data->ctx_bm, SA_MAX_NUM_CTX);
808 __set_bit(bn, data->ctx_bm);
809 data->sc_id++;
810 spin_unlock(&data->scid_lock);
811
812 ctx->sc_id = (u16)(data->sc_id_start + bn);
813
814 ctx->sc = dma_pool_alloc(data->sc_pool, GFP_KERNEL, &ctx->sc_phys);
815 if (!ctx->sc) {
816 dev_err(&data->pdev->dev, "Failed to allocate SC memory\n");
817 err = -ENOMEM;
818 goto scid_rollback;
819 }
820
821 return 0;
822
823scid_rollback:
824 spin_lock(&data->scid_lock);
825 __clear_bit(bn, data->ctx_bm);
826 data->sc_id--;
827 spin_unlock(&data->scid_lock);
828
829 return err;
830}
831
832static void sa_cipher_cra_exit(struct crypto_skcipher *tfm)
833{
834 struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
835 struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
836
837 dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
838 __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
839 ctx->dec.sc_id, &ctx->dec.sc_phys);
840
841 sa_free_ctx_info(&ctx->enc, data);
842 sa_free_ctx_info(&ctx->dec, data);
843
844 crypto_free_skcipher(ctx->fallback.skcipher);
845}
846
847static int sa_cipher_cra_init(struct crypto_skcipher *tfm)
848{
849 struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
850 struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
851 const char *name = crypto_tfm_alg_name(&tfm->base);
852 struct crypto_skcipher *child;
853 int ret;
854
855 memzero_explicit(ctx, sizeof(*ctx));
856 ctx->dev_data = data;
857
858 ret = sa_init_ctx_info(&ctx->enc, data);
859 if (ret)
860 return ret;
861 ret = sa_init_ctx_info(&ctx->dec, data);
862 if (ret) {
863 sa_free_ctx_info(&ctx->enc, data);
864 return ret;
865 }
866
867 child = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
868
869 if (IS_ERR(child)) {
870 dev_err(sa_k3_dev, "Error allocating fallback algo %s\n", name);
871 return PTR_ERR(child);
872 }
873
874 ctx->fallback.skcipher = child;
875 crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(child) +
876 sizeof(struct skcipher_request));
877
878 dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
879 __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
880 ctx->dec.sc_id, &ctx->dec.sc_phys);
881 return 0;
882}
883
884static int sa_cipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
885 unsigned int keylen, struct algo_data *ad)
886{
887 struct sa_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
888 struct crypto_skcipher *child = ctx->fallback.skcipher;
889 int cmdl_len;
890 struct sa_cmdl_cfg cfg;
891 int ret;
892
893 if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
894 keylen != AES_KEYSIZE_256)
895 return -EINVAL;
896
897 ad->enc_eng.eng_id = SA_ENG_ID_EM1;
898 ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
899
900 memzero_explicit(&cfg, sizeof(cfg));
901 cfg.enc_eng_id = ad->enc_eng.eng_id;
902 cfg.iv_size = crypto_skcipher_ivsize(tfm);
903
904 crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
905 crypto_skcipher_set_flags(child, tfm->base.crt_flags &
906 CRYPTO_TFM_REQ_MASK);
907 ret = crypto_skcipher_setkey(child, key, keylen);
908 if (ret)
909 return ret;
910
911 /* Setup Encryption Security Context & Command label template */
912 if (sa_init_sc(&ctx->enc, ctx->dev_data->match_data, key, keylen, NULL, 0,
913 ad, 1, &ctx->enc.epib[1]))
914 goto badkey;
915
916 cmdl_len = sa_format_cmdl_gen(&cfg,
917 (u8 *)ctx->enc.cmdl,
918 &ctx->enc.cmdl_upd_info);
919 if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
920 goto badkey;
921
922 ctx->enc.cmdl_size = cmdl_len;
923
924 /* Setup Decryption Security Context & Command label template */
925 if (sa_init_sc(&ctx->dec, ctx->dev_data->match_data, key, keylen, NULL, 0,
926 ad, 0, &ctx->dec.epib[1]))
927 goto badkey;
928
929 cfg.enc_eng_id = ad->enc_eng.eng_id;
930 cmdl_len = sa_format_cmdl_gen(&cfg, (u8 *)ctx->dec.cmdl,
931 &ctx->dec.cmdl_upd_info);
932
933 if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
934 goto badkey;
935
936 ctx->dec.cmdl_size = cmdl_len;
937 ctx->iv_idx = ad->iv_idx;
938
939 return 0;
940
941badkey:
942 dev_err(sa_k3_dev, "%s: badkey\n", __func__);
943 return -EINVAL;
944}
945
946static int sa_aes_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key,
947 unsigned int keylen)
948{
949 struct algo_data ad = { 0 };
950 /* Convert the key size (16/24/32) to the key size index (0/1/2) */
951 int key_idx = (keylen >> 3) - 2;
952
953 if (key_idx >= 3)
954 return -EINVAL;
955
956 ad.mci_enc = mci_cbc_enc_array[key_idx];
957 ad.mci_dec = mci_cbc_dec_array[key_idx];
958 ad.inv_key = true;
959 ad.ealg_id = SA_EALG_ID_AES_CBC;
960 ad.iv_idx = 4;
961 ad.iv_out_size = 16;
962
963 return sa_cipher_setkey(tfm, key, keylen, &ad);
964}
965
966static int sa_aes_ecb_setkey(struct crypto_skcipher *tfm, const u8 *key,
967 unsigned int keylen)
968{
969 struct algo_data ad = { 0 };
970 /* Convert the key size (16/24/32) to the key size index (0/1/2) */
971 int key_idx = (keylen >> 3) - 2;
972
973 if (key_idx >= 3)
974 return -EINVAL;
975
976 ad.mci_enc = mci_ecb_enc_array[key_idx];
977 ad.mci_dec = mci_ecb_dec_array[key_idx];
978 ad.inv_key = true;
979 ad.ealg_id = SA_EALG_ID_AES_ECB;
980
981 return sa_cipher_setkey(tfm, key, keylen, &ad);
982}
983
984static int sa_3des_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key,
985 unsigned int keylen)
986{
987 struct algo_data ad = { 0 };
988
989 ad.mci_enc = mci_cbc_3des_enc_array;
990 ad.mci_dec = mci_cbc_3des_dec_array;
991 ad.ealg_id = SA_EALG_ID_3DES_CBC;
992 ad.iv_idx = 6;
993 ad.iv_out_size = 8;
994
995 return sa_cipher_setkey(tfm, key, keylen, &ad);
996}
997
998static int sa_3des_ecb_setkey(struct crypto_skcipher *tfm, const u8 *key,
999 unsigned int keylen)
1000{
1001 struct algo_data ad = { 0 };
1002
1003 ad.mci_enc = mci_ecb_3des_enc_array;
1004 ad.mci_dec = mci_ecb_3des_dec_array;
1005
1006 return sa_cipher_setkey(tfm, key, keylen, &ad);
1007}
1008
1009static void sa_sync_from_device(struct sa_rx_data *rxd)
1010{
1011 struct sg_table *sgt;
1012
1013 if (rxd->mapped_sg[0].dir == DMA_BIDIRECTIONAL)
1014 sgt = &rxd->mapped_sg[0].sgt;
1015 else
1016 sgt = &rxd->mapped_sg[1].sgt;
1017
1018 dma_sync_sgtable_for_cpu(rxd->ddev, sgt, DMA_FROM_DEVICE);
1019}
1020
1021static void sa_free_sa_rx_data(struct sa_rx_data *rxd)
1022{
1023 int i;
1024
1025 for (i = 0; i < ARRAY_SIZE(rxd->mapped_sg); i++) {
1026 struct sa_mapped_sg *mapped_sg = &rxd->mapped_sg[i];
1027
1028 if (mapped_sg->mapped) {
1029 dma_unmap_sgtable(rxd->ddev, &mapped_sg->sgt,
1030 mapped_sg->dir, 0);
1031 kfree(mapped_sg->split_sg);
1032 }
1033 }
1034
1035 kfree(rxd);
1036}
1037
1038static void sa_aes_dma_in_callback(void *data)
1039{
1040 struct sa_rx_data *rxd = (struct sa_rx_data *)data;
1041 struct skcipher_request *req;
1042 u32 *result;
1043 __be32 *mdptr;
1044 size_t ml, pl;
1045 int i;
1046
1047 sa_sync_from_device(rxd);
1048 req = container_of(rxd->req, struct skcipher_request, base);
1049
1050 if (req->iv) {
1051 mdptr = (__be32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl,
1052 &ml);
1053 result = (u32 *)req->iv;
1054
1055 for (i = 0; i < (rxd->enc_iv_size / 4); i++)
1056 result[i] = be32_to_cpu(mdptr[i + rxd->iv_idx]);
1057 }
1058
1059 sa_free_sa_rx_data(rxd);
1060
1061 skcipher_request_complete(req, 0);
1062}
1063
1064static void
1065sa_prepare_tx_desc(u32 *mdptr, u32 pslen, u32 *psdata, u32 epiblen, u32 *epib)
1066{
1067 u32 *out, *in;
1068 int i;
1069
1070 for (out = mdptr, in = epib, i = 0; i < epiblen / sizeof(u32); i++)
1071 *out++ = *in++;
1072
1073 mdptr[4] = (0xFFFF << 16);
1074 for (out = &mdptr[5], in = psdata, i = 0;
1075 i < pslen / sizeof(u32); i++)
1076 *out++ = *in++;
1077}
1078
1079static int sa_run(struct sa_req *req)
1080{
1081 struct sa_rx_data *rxd;
1082 gfp_t gfp_flags;
1083 u32 cmdl[SA_MAX_CMDL_WORDS];
1084 struct sa_crypto_data *pdata = dev_get_drvdata(sa_k3_dev);
1085 struct device *ddev;
1086 struct dma_chan *dma_rx;
1087 int sg_nents, src_nents, dst_nents;
1088 struct scatterlist *src, *dst;
1089 size_t pl, ml, split_size;
1090 struct sa_ctx_info *sa_ctx = req->enc ? &req->ctx->enc : &req->ctx->dec;
1091 int ret;
1092 struct dma_async_tx_descriptor *tx_out;
1093 u32 *mdptr;
1094 bool diff_dst;
1095 enum dma_data_direction dir_src;
1096 struct sa_mapped_sg *mapped_sg;
1097
1098 gfp_flags = req->base->flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
1099 GFP_KERNEL : GFP_ATOMIC;
1100
1101 rxd = kzalloc(sizeof(*rxd), gfp_flags);
1102 if (!rxd)
1103 return -ENOMEM;
1104
1105 if (req->src != req->dst) {
1106 diff_dst = true;
1107 dir_src = DMA_TO_DEVICE;
1108 } else {
1109 diff_dst = false;
1110 dir_src = DMA_BIDIRECTIONAL;
1111 }
1112
1113 /*
1114 * SA2UL has an interesting feature where the receive DMA channel
1115 * is selected based on the data passed to the engine. Within the
1116 * transition range, there is also a space where it is impossible
1117 * to determine where the data will end up, and this should be
1118 * avoided. This will be handled by the SW fallback mechanism by
1119 * the individual algorithm implementations.
1120 */
1121 if (req->size >= 256)
1122 dma_rx = pdata->dma_rx2;
1123 else
1124 dma_rx = pdata->dma_rx1;
1125
1126 ddev = dmaengine_get_dma_device(pdata->dma_tx);
1127 rxd->ddev = ddev;
1128
1129 memcpy(cmdl, sa_ctx->cmdl, sa_ctx->cmdl_size);
1130
1131 sa_update_cmdl(req, cmdl, &sa_ctx->cmdl_upd_info);
1132
1133 if (req->type != CRYPTO_ALG_TYPE_AHASH) {
1134 if (req->enc)
1135 req->type |=
1136 (SA_REQ_SUBTYPE_ENC << SA_REQ_SUBTYPE_SHIFT);
1137 else
1138 req->type |=
1139 (SA_REQ_SUBTYPE_DEC << SA_REQ_SUBTYPE_SHIFT);
1140 }
1141
1142 cmdl[sa_ctx->cmdl_size / sizeof(u32)] = req->type;
1143
1144 /*
1145 * Map the packets, first we check if the data fits into a single
1146 * sg entry and use that if possible. If it does not fit, we check
1147 * if we need to do sg_split to align the scatterlist data on the
1148 * actual data size being processed by the crypto engine.
1149 */
1150 src = req->src;
1151 sg_nents = sg_nents_for_len(src, req->size);
1152
1153 split_size = req->size;
1154
1155 mapped_sg = &rxd->mapped_sg[0];
1156 if (sg_nents == 1 && split_size <= req->src->length) {
1157 src = &mapped_sg->static_sg;
1158 src_nents = 1;
1159 sg_init_table(src, 1);
1160 sg_set_page(src, sg_page(req->src), split_size,
1161 req->src->offset);
1162
1163 mapped_sg->sgt.sgl = src;
1164 mapped_sg->sgt.orig_nents = src_nents;
1165 ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0);
1166 if (ret) {
1167 kfree(rxd);
1168 return ret;
1169 }
1170
1171 mapped_sg->dir = dir_src;
1172 mapped_sg->mapped = true;
1173 } else {
1174 mapped_sg->sgt.sgl = req->src;
1175 mapped_sg->sgt.orig_nents = sg_nents;
1176 ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0);
1177 if (ret) {
1178 kfree(rxd);
1179 return ret;
1180 }
1181
1182 mapped_sg->dir = dir_src;
1183 mapped_sg->mapped = true;
1184
1185 ret = sg_split(mapped_sg->sgt.sgl, mapped_sg->sgt.nents, 0, 1,
1186 &split_size, &src, &src_nents, gfp_flags);
1187 if (ret) {
1188 src_nents = mapped_sg->sgt.nents;
1189 src = mapped_sg->sgt.sgl;
1190 } else {
1191 mapped_sg->split_sg = src;
1192 }
1193 }
1194
1195 dma_sync_sgtable_for_device(ddev, &mapped_sg->sgt, DMA_TO_DEVICE);
1196
1197 if (!diff_dst) {
1198 dst_nents = src_nents;
1199 dst = src;
1200 } else {
1201 dst_nents = sg_nents_for_len(req->dst, req->size);
1202 mapped_sg = &rxd->mapped_sg[1];
1203
1204 if (dst_nents == 1 && split_size <= req->dst->length) {
1205 dst = &mapped_sg->static_sg;
1206 dst_nents = 1;
1207 sg_init_table(dst, 1);
1208 sg_set_page(dst, sg_page(req->dst), split_size,
1209 req->dst->offset);
1210
1211 mapped_sg->sgt.sgl = dst;
1212 mapped_sg->sgt.orig_nents = dst_nents;
1213 ret = dma_map_sgtable(ddev, &mapped_sg->sgt,
1214 DMA_FROM_DEVICE, 0);
1215 if (ret)
1216 goto err_cleanup;
1217
1218 mapped_sg->dir = DMA_FROM_DEVICE;
1219 mapped_sg->mapped = true;
1220 } else {
1221 mapped_sg->sgt.sgl = req->dst;
1222 mapped_sg->sgt.orig_nents = dst_nents;
1223 ret = dma_map_sgtable(ddev, &mapped_sg->sgt,
1224 DMA_FROM_DEVICE, 0);
1225 if (ret)
1226 goto err_cleanup;
1227
1228 mapped_sg->dir = DMA_FROM_DEVICE;
1229 mapped_sg->mapped = true;
1230
1231 ret = sg_split(mapped_sg->sgt.sgl, mapped_sg->sgt.nents,
1232 0, 1, &split_size, &dst, &dst_nents,
1233 gfp_flags);
1234 if (ret) {
1235 dst_nents = mapped_sg->sgt.nents;
1236 dst = mapped_sg->sgt.sgl;
1237 } else {
1238 mapped_sg->split_sg = dst;
1239 }
1240 }
1241 }
1242
1243 rxd->tx_in = dmaengine_prep_slave_sg(dma_rx, dst, dst_nents,
1244 DMA_DEV_TO_MEM,
1245 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1246 if (!rxd->tx_in) {
1247 dev_err(pdata->dev, "IN prep_slave_sg() failed\n");
1248 ret = -EINVAL;
1249 goto err_cleanup;
1250 }
1251
1252 rxd->req = (void *)req->base;
1253 rxd->enc = req->enc;
1254 rxd->iv_idx = req->ctx->iv_idx;
1255 rxd->enc_iv_size = sa_ctx->cmdl_upd_info.enc_iv.size;
1256 rxd->tx_in->callback = req->callback;
1257 rxd->tx_in->callback_param = rxd;
1258
1259 tx_out = dmaengine_prep_slave_sg(pdata->dma_tx, src,
1260 src_nents, DMA_MEM_TO_DEV,
1261 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1262
1263 if (!tx_out) {
1264 dev_err(pdata->dev, "OUT prep_slave_sg() failed\n");
1265 ret = -EINVAL;
1266 goto err_cleanup;
1267 }
1268
1269 /*
1270 * Prepare metadata for DMA engine. This essentially describes the
1271 * crypto algorithm to be used, data sizes, different keys etc.
1272 */
1273 mdptr = (u32 *)dmaengine_desc_get_metadata_ptr(tx_out, &pl, &ml);
1274
1275 sa_prepare_tx_desc(mdptr, (sa_ctx->cmdl_size + (SA_PSDATA_CTX_WORDS *
1276 sizeof(u32))), cmdl, sizeof(sa_ctx->epib),
1277 sa_ctx->epib);
1278
1279 ml = sa_ctx->cmdl_size + (SA_PSDATA_CTX_WORDS * sizeof(u32));
1280 dmaengine_desc_set_metadata_len(tx_out, req->mdata_size);
1281
1282 dmaengine_submit(tx_out);
1283 dmaengine_submit(rxd->tx_in);
1284
1285 dma_async_issue_pending(dma_rx);
1286 dma_async_issue_pending(pdata->dma_tx);
1287
1288 return -EINPROGRESS;
1289
1290err_cleanup:
1291 sa_free_sa_rx_data(rxd);
1292
1293 return ret;
1294}
1295
1296static int sa_cipher_run(struct skcipher_request *req, u8 *iv, int enc)
1297{
1298 struct sa_tfm_ctx *ctx =
1299 crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
1300 struct crypto_alg *alg = req->base.tfm->__crt_alg;
1301 struct sa_req sa_req = { 0 };
1302
1303 if (!req->cryptlen)
1304 return 0;
1305
1306 if (req->cryptlen % alg->cra_blocksize)
1307 return -EINVAL;
1308
1309 /* Use SW fallback if the data size is not supported */
1310 if (req->cryptlen > SA_MAX_DATA_SZ ||
1311 (req->cryptlen >= SA_UNSAFE_DATA_SZ_MIN &&
1312 req->cryptlen <= SA_UNSAFE_DATA_SZ_MAX)) {
1313 struct skcipher_request *subreq = skcipher_request_ctx(req);
1314
1315 skcipher_request_set_tfm(subreq, ctx->fallback.skcipher);
1316 skcipher_request_set_callback(subreq, req->base.flags,
1317 req->base.complete,
1318 req->base.data);
1319 skcipher_request_set_crypt(subreq, req->src, req->dst,
1320 req->cryptlen, req->iv);
1321 if (enc)
1322 return crypto_skcipher_encrypt(subreq);
1323 else
1324 return crypto_skcipher_decrypt(subreq);
1325 }
1326
1327 sa_req.size = req->cryptlen;
1328 sa_req.enc_size = req->cryptlen;
1329 sa_req.src = req->src;
1330 sa_req.dst = req->dst;
1331 sa_req.enc_iv = iv;
1332 sa_req.type = CRYPTO_ALG_TYPE_SKCIPHER;
1333 sa_req.enc = enc;
1334 sa_req.callback = sa_aes_dma_in_callback;
1335 sa_req.mdata_size = 44;
1336 sa_req.base = &req->base;
1337 sa_req.ctx = ctx;
1338
1339 return sa_run(&sa_req);
1340}
1341
1342static int sa_encrypt(struct skcipher_request *req)
1343{
1344 return sa_cipher_run(req, req->iv, 1);
1345}
1346
1347static int sa_decrypt(struct skcipher_request *req)
1348{
1349 return sa_cipher_run(req, req->iv, 0);
1350}
1351
1352static void sa_sha_dma_in_callback(void *data)
1353{
1354 struct sa_rx_data *rxd = (struct sa_rx_data *)data;
1355 struct ahash_request *req;
1356 struct crypto_ahash *tfm;
1357 unsigned int authsize;
1358 int i;
1359 size_t ml, pl;
1360 u32 *result;
1361 __be32 *mdptr;
1362
1363 sa_sync_from_device(rxd);
1364 req = container_of(rxd->req, struct ahash_request, base);
1365 tfm = crypto_ahash_reqtfm(req);
1366 authsize = crypto_ahash_digestsize(tfm);
1367
1368 mdptr = (__be32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl, &ml);
1369 result = (u32 *)req->result;
1370
1371 for (i = 0; i < (authsize / 4); i++)
1372 result[i] = be32_to_cpu(mdptr[i + 4]);
1373
1374 sa_free_sa_rx_data(rxd);
1375
1376 ahash_request_complete(req, 0);
1377}
1378
1379static int zero_message_process(struct ahash_request *req)
1380{
1381 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1382 int sa_digest_size = crypto_ahash_digestsize(tfm);
1383
1384 switch (sa_digest_size) {
1385 case SHA1_DIGEST_SIZE:
1386 memcpy(req->result, sha1_zero_message_hash, sa_digest_size);
1387 break;
1388 case SHA256_DIGEST_SIZE:
1389 memcpy(req->result, sha256_zero_message_hash, sa_digest_size);
1390 break;
1391 case SHA512_DIGEST_SIZE:
1392 memcpy(req->result, sha512_zero_message_hash, sa_digest_size);
1393 break;
1394 default:
1395 return -EINVAL;
1396 }
1397
1398 return 0;
1399}
1400
1401static int sa_sha_run(struct ahash_request *req)
1402{
1403 struct sa_tfm_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
1404 struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1405 struct sa_req sa_req = { 0 };
1406 size_t auth_len;
1407
1408 auth_len = req->nbytes;
1409
1410 if (!auth_len)
1411 return zero_message_process(req);
1412
1413 if (auth_len > SA_MAX_DATA_SZ ||
1414 (auth_len >= SA_UNSAFE_DATA_SZ_MIN &&
1415 auth_len <= SA_UNSAFE_DATA_SZ_MAX)) {
1416 struct ahash_request *subreq = &rctx->fallback_req;
1417 int ret = 0;
1418
1419 ahash_request_set_tfm(subreq, ctx->fallback.ahash);
1420 subreq->base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1421
1422 crypto_ahash_init(subreq);
1423
1424 subreq->nbytes = auth_len;
1425 subreq->src = req->src;
1426 subreq->result = req->result;
1427
1428 ret |= crypto_ahash_update(subreq);
1429
1430 subreq->nbytes = 0;
1431
1432 ret |= crypto_ahash_final(subreq);
1433
1434 return ret;
1435 }
1436
1437 sa_req.size = auth_len;
1438 sa_req.auth_size = auth_len;
1439 sa_req.src = req->src;
1440 sa_req.dst = req->src;
1441 sa_req.enc = true;
1442 sa_req.type = CRYPTO_ALG_TYPE_AHASH;
1443 sa_req.callback = sa_sha_dma_in_callback;
1444 sa_req.mdata_size = 28;
1445 sa_req.ctx = ctx;
1446 sa_req.base = &req->base;
1447
1448 return sa_run(&sa_req);
1449}
1450
1451static int sa_sha_setup(struct sa_tfm_ctx *ctx, struct algo_data *ad)
1452{
1453 int bs = crypto_shash_blocksize(ctx->shash);
1454 int cmdl_len;
1455 struct sa_cmdl_cfg cfg;
1456
1457 ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
1458 ad->auth_eng.eng_id = SA_ENG_ID_AM1;
1459 ad->auth_eng.sc_size = SA_CTX_AUTH_TYPE2_SZ;
1460
1461 memset(ctx->authkey, 0, bs);
1462 memset(&cfg, 0, sizeof(cfg));
1463 cfg.aalg = ad->aalg_id;
1464 cfg.enc_eng_id = ad->enc_eng.eng_id;
1465 cfg.auth_eng_id = ad->auth_eng.eng_id;
1466 cfg.iv_size = 0;
1467 cfg.akey = NULL;
1468 cfg.akey_len = 0;
1469
1470 ctx->dev_data = dev_get_drvdata(sa_k3_dev);
1471 /* Setup Encryption Security Context & Command label template */
1472 if (sa_init_sc(&ctx->enc, ctx->dev_data->match_data, NULL, 0, NULL, 0,
1473 ad, 0, &ctx->enc.epib[1]))
1474 goto badkey;
1475
1476 cmdl_len = sa_format_cmdl_gen(&cfg,
1477 (u8 *)ctx->enc.cmdl,
1478 &ctx->enc.cmdl_upd_info);
1479 if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
1480 goto badkey;
1481
1482 ctx->enc.cmdl_size = cmdl_len;
1483
1484 return 0;
1485
1486badkey:
1487 dev_err(sa_k3_dev, "%s: badkey\n", __func__);
1488 return -EINVAL;
1489}
1490
1491static int sa_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
1492{
1493 struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1494 struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
1495 int ret;
1496
1497 memset(ctx, 0, sizeof(*ctx));
1498 ctx->dev_data = data;
1499 ret = sa_init_ctx_info(&ctx->enc, data);
1500 if (ret)
1501 return ret;
1502
1503 if (alg_base) {
1504 ctx->shash = crypto_alloc_shash(alg_base, 0,
1505 CRYPTO_ALG_NEED_FALLBACK);
1506 if (IS_ERR(ctx->shash)) {
1507 dev_err(sa_k3_dev, "base driver %s couldn't be loaded\n",
1508 alg_base);
1509 return PTR_ERR(ctx->shash);
1510 }
1511 /* for fallback */
1512 ctx->fallback.ahash =
1513 crypto_alloc_ahash(alg_base, 0,
1514 CRYPTO_ALG_NEED_FALLBACK);
1515 if (IS_ERR(ctx->fallback.ahash)) {
1516 dev_err(ctx->dev_data->dev,
1517 "Could not load fallback driver\n");
1518 return PTR_ERR(ctx->fallback.ahash);
1519 }
1520 }
1521
1522 dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
1523 __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
1524 ctx->dec.sc_id, &ctx->dec.sc_phys);
1525
1526 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1527 sizeof(struct sa_sha_req_ctx) +
1528 crypto_ahash_reqsize(ctx->fallback.ahash));
1529
1530 return 0;
1531}
1532
1533static int sa_sha_digest(struct ahash_request *req)
1534{
1535 return sa_sha_run(req);
1536}
1537
1538static int sa_sha_init(struct ahash_request *req)
1539{
1540 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1541 struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1542 struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1543
1544 dev_dbg(sa_k3_dev, "init: digest size: %u, rctx=%p\n",
1545 crypto_ahash_digestsize(tfm), rctx);
1546
1547 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1548 rctx->fallback_req.base.flags =
1549 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1550
1551 return crypto_ahash_init(&rctx->fallback_req);
1552}
1553
1554static int sa_sha_update(struct ahash_request *req)
1555{
1556 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1557 struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1558 struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1559
1560 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1561 rctx->fallback_req.base.flags =
1562 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1563 rctx->fallback_req.nbytes = req->nbytes;
1564 rctx->fallback_req.src = req->src;
1565
1566 return crypto_ahash_update(&rctx->fallback_req);
1567}
1568
1569static int sa_sha_final(struct ahash_request *req)
1570{
1571 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1572 struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1573 struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1574
1575 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1576 rctx->fallback_req.base.flags =
1577 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1578 rctx->fallback_req.result = req->result;
1579
1580 return crypto_ahash_final(&rctx->fallback_req);
1581}
1582
1583static int sa_sha_finup(struct ahash_request *req)
1584{
1585 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1586 struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1587 struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1588
1589 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1590 rctx->fallback_req.base.flags =
1591 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1592
1593 rctx->fallback_req.nbytes = req->nbytes;
1594 rctx->fallback_req.src = req->src;
1595 rctx->fallback_req.result = req->result;
1596
1597 return crypto_ahash_finup(&rctx->fallback_req);
1598}
1599
1600static int sa_sha_import(struct ahash_request *req, const void *in)
1601{
1602 struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1603 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1604 struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1605
1606 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback.ahash);
1607 rctx->fallback_req.base.flags = req->base.flags &
1608 CRYPTO_TFM_REQ_MAY_SLEEP;
1609
1610 return crypto_ahash_import(&rctx->fallback_req, in);
1611}
1612
1613static int sa_sha_export(struct ahash_request *req, void *out)
1614{
1615 struct sa_sha_req_ctx *rctx = ahash_request_ctx(req);
1616 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1617 struct sa_tfm_ctx *ctx = crypto_ahash_ctx(tfm);
1618 struct ahash_request *subreq = &rctx->fallback_req;
1619
1620 ahash_request_set_tfm(subreq, ctx->fallback.ahash);
1621 subreq->base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
1622
1623 return crypto_ahash_export(subreq, out);
1624}
1625
1626static int sa_sha1_cra_init(struct crypto_tfm *tfm)
1627{
1628 struct algo_data ad = { 0 };
1629 struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1630
1631 sa_sha_cra_init_alg(tfm, "sha1");
1632
1633 ad.aalg_id = SA_AALG_ID_SHA1;
1634 ad.hash_size = SHA1_DIGEST_SIZE;
1635 ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA1;
1636
1637 sa_sha_setup(ctx, &ad);
1638
1639 return 0;
1640}
1641
1642static int sa_sha256_cra_init(struct crypto_tfm *tfm)
1643{
1644 struct algo_data ad = { 0 };
1645 struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1646
1647 sa_sha_cra_init_alg(tfm, "sha256");
1648
1649 ad.aalg_id = SA_AALG_ID_SHA2_256;
1650 ad.hash_size = SHA256_DIGEST_SIZE;
1651 ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA256;
1652
1653 sa_sha_setup(ctx, &ad);
1654
1655 return 0;
1656}
1657
1658static int sa_sha512_cra_init(struct crypto_tfm *tfm)
1659{
1660 struct algo_data ad = { 0 };
1661 struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1662
1663 sa_sha_cra_init_alg(tfm, "sha512");
1664
1665 ad.aalg_id = SA_AALG_ID_SHA2_512;
1666 ad.hash_size = SHA512_DIGEST_SIZE;
1667 ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA512;
1668
1669 sa_sha_setup(ctx, &ad);
1670
1671 return 0;
1672}
1673
1674static void sa_sha_cra_exit(struct crypto_tfm *tfm)
1675{
1676 struct sa_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
1677 struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
1678
1679 dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
1680 __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
1681 ctx->dec.sc_id, &ctx->dec.sc_phys);
1682
1683 if (crypto_tfm_alg_type(tfm) == CRYPTO_ALG_TYPE_AHASH)
1684 sa_free_ctx_info(&ctx->enc, data);
1685
1686 crypto_free_shash(ctx->shash);
1687 crypto_free_ahash(ctx->fallback.ahash);
1688}
1689
1690static void sa_aead_dma_in_callback(void *data)
1691{
1692 struct sa_rx_data *rxd = (struct sa_rx_data *)data;
1693 struct aead_request *req;
1694 struct crypto_aead *tfm;
1695 unsigned int start;
1696 unsigned int authsize;
1697 u8 auth_tag[SA_MAX_AUTH_TAG_SZ];
1698 size_t pl, ml;
1699 int i;
1700 int err = 0;
1701 u32 *mdptr;
1702
1703 sa_sync_from_device(rxd);
1704 req = container_of(rxd->req, struct aead_request, base);
1705 tfm = crypto_aead_reqtfm(req);
1706 start = req->assoclen + req->cryptlen;
1707 authsize = crypto_aead_authsize(tfm);
1708
1709 mdptr = (u32 *)dmaengine_desc_get_metadata_ptr(rxd->tx_in, &pl, &ml);
1710 for (i = 0; i < (authsize / 4); i++)
1711 mdptr[i + 4] = swab32(mdptr[i + 4]);
1712
1713 if (rxd->enc) {
1714 scatterwalk_map_and_copy(&mdptr[4], req->dst, start, authsize,
1715 1);
1716 } else {
1717 start -= authsize;
1718 scatterwalk_map_and_copy(auth_tag, req->src, start, authsize,
1719 0);
1720
1721 err = memcmp(&mdptr[4], auth_tag, authsize) ? -EBADMSG : 0;
1722 }
1723
1724 sa_free_sa_rx_data(rxd);
1725
1726 aead_request_complete(req, err);
1727}
1728
1729static int sa_cra_init_aead(struct crypto_aead *tfm, const char *hash,
1730 const char *fallback)
1731{
1732 struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
1733 struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
1734 int ret;
1735
1736 memzero_explicit(ctx, sizeof(*ctx));
1737 ctx->dev_data = data;
1738
1739 ctx->shash = crypto_alloc_shash(hash, 0, CRYPTO_ALG_NEED_FALLBACK);
1740 if (IS_ERR(ctx->shash)) {
1741 dev_err(sa_k3_dev, "base driver %s couldn't be loaded\n", hash);
1742 return PTR_ERR(ctx->shash);
1743 }
1744
1745 ctx->fallback.aead = crypto_alloc_aead(fallback, 0,
1746 CRYPTO_ALG_NEED_FALLBACK);
1747
1748 if (IS_ERR(ctx->fallback.aead)) {
1749 dev_err(sa_k3_dev, "fallback driver %s couldn't be loaded\n",
1750 fallback);
1751 return PTR_ERR(ctx->fallback.aead);
1752 }
1753
1754 crypto_aead_set_reqsize(tfm, sizeof(struct aead_request) +
1755 crypto_aead_reqsize(ctx->fallback.aead));
1756
1757 ret = sa_init_ctx_info(&ctx->enc, data);
1758 if (ret)
1759 return ret;
1760
1761 ret = sa_init_ctx_info(&ctx->dec, data);
1762 if (ret) {
1763 sa_free_ctx_info(&ctx->enc, data);
1764 return ret;
1765 }
1766
1767 dev_dbg(sa_k3_dev, "%s(0x%p) sc-ids(0x%x(0x%pad), 0x%x(0x%pad))\n",
1768 __func__, tfm, ctx->enc.sc_id, &ctx->enc.sc_phys,
1769 ctx->dec.sc_id, &ctx->dec.sc_phys);
1770
1771 return ret;
1772}
1773
1774static int sa_cra_init_aead_sha1(struct crypto_aead *tfm)
1775{
1776 return sa_cra_init_aead(tfm, "sha1",
1777 "authenc(hmac(sha1-ce),cbc(aes-ce))");
1778}
1779
1780static int sa_cra_init_aead_sha256(struct crypto_aead *tfm)
1781{
1782 return sa_cra_init_aead(tfm, "sha256",
1783 "authenc(hmac(sha256-ce),cbc(aes-ce))");
1784}
1785
1786static void sa_exit_tfm_aead(struct crypto_aead *tfm)
1787{
1788 struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
1789 struct sa_crypto_data *data = dev_get_drvdata(sa_k3_dev);
1790
1791 crypto_free_shash(ctx->shash);
1792 crypto_free_aead(ctx->fallback.aead);
1793
1794 sa_free_ctx_info(&ctx->enc, data);
1795 sa_free_ctx_info(&ctx->dec, data);
1796}
1797
1798/* AEAD algorithm configuration interface function */
1799static int sa_aead_setkey(struct crypto_aead *authenc,
1800 const u8 *key, unsigned int keylen,
1801 struct algo_data *ad)
1802{
1803 struct sa_tfm_ctx *ctx = crypto_aead_ctx(authenc);
1804 struct crypto_authenc_keys keys;
1805 int cmdl_len;
1806 struct sa_cmdl_cfg cfg;
1807 int key_idx;
1808
1809 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
1810 return -EINVAL;
1811
1812 /* Convert the key size (16/24/32) to the key size index (0/1/2) */
1813 key_idx = (keys.enckeylen >> 3) - 2;
1814 if (key_idx >= 3)
1815 return -EINVAL;
1816
1817 ad->ctx = ctx;
1818 ad->enc_eng.eng_id = SA_ENG_ID_EM1;
1819 ad->enc_eng.sc_size = SA_CTX_ENC_TYPE1_SZ;
1820 ad->auth_eng.eng_id = SA_ENG_ID_AM1;
1821 ad->auth_eng.sc_size = SA_CTX_AUTH_TYPE2_SZ;
1822 ad->mci_enc = mci_cbc_enc_no_iv_array[key_idx];
1823 ad->mci_dec = mci_cbc_dec_no_iv_array[key_idx];
1824 ad->inv_key = true;
1825 ad->keyed_mac = true;
1826 ad->ealg_id = SA_EALG_ID_AES_CBC;
1827 ad->prep_iopad = sa_prepare_iopads;
1828
1829 memset(&cfg, 0, sizeof(cfg));
1830 cfg.enc = true;
1831 cfg.aalg = ad->aalg_id;
1832 cfg.enc_eng_id = ad->enc_eng.eng_id;
1833 cfg.auth_eng_id = ad->auth_eng.eng_id;
1834 cfg.iv_size = crypto_aead_ivsize(authenc);
1835 cfg.akey = keys.authkey;
1836 cfg.akey_len = keys.authkeylen;
1837
1838 /* Setup Encryption Security Context & Command label template */
1839 if (sa_init_sc(&ctx->enc, ctx->dev_data->match_data, keys.enckey,
1840 keys.enckeylen, keys.authkey, keys.authkeylen,
1841 ad, 1, &ctx->enc.epib[1]))
1842 return -EINVAL;
1843
1844 cmdl_len = sa_format_cmdl_gen(&cfg,
1845 (u8 *)ctx->enc.cmdl,
1846 &ctx->enc.cmdl_upd_info);
1847 if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
1848 return -EINVAL;
1849
1850 ctx->enc.cmdl_size = cmdl_len;
1851
1852 /* Setup Decryption Security Context & Command label template */
1853 if (sa_init_sc(&ctx->dec, ctx->dev_data->match_data, keys.enckey,
1854 keys.enckeylen, keys.authkey, keys.authkeylen,
1855 ad, 0, &ctx->dec.epib[1]))
1856 return -EINVAL;
1857
1858 cfg.enc = false;
1859 cmdl_len = sa_format_cmdl_gen(&cfg, (u8 *)ctx->dec.cmdl,
1860 &ctx->dec.cmdl_upd_info);
1861
1862 if (cmdl_len <= 0 || (cmdl_len > SA_MAX_CMDL_WORDS * sizeof(u32)))
1863 return -EINVAL;
1864
1865 ctx->dec.cmdl_size = cmdl_len;
1866
1867 crypto_aead_clear_flags(ctx->fallback.aead, CRYPTO_TFM_REQ_MASK);
1868 crypto_aead_set_flags(ctx->fallback.aead,
1869 crypto_aead_get_flags(authenc) &
1870 CRYPTO_TFM_REQ_MASK);
1871 crypto_aead_setkey(ctx->fallback.aead, key, keylen);
1872
1873 return 0;
1874}
1875
1876static int sa_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
1877{
1878 struct sa_tfm_ctx *ctx = crypto_tfm_ctx(crypto_aead_tfm(tfm));
1879
1880 return crypto_aead_setauthsize(ctx->fallback.aead, authsize);
1881}
1882
1883static int sa_aead_cbc_sha1_setkey(struct crypto_aead *authenc,
1884 const u8 *key, unsigned int keylen)
1885{
1886 struct algo_data ad = { 0 };
1887
1888 ad.ealg_id = SA_EALG_ID_AES_CBC;
1889 ad.aalg_id = SA_AALG_ID_HMAC_SHA1;
1890 ad.hash_size = SHA1_DIGEST_SIZE;
1891 ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA1;
1892
1893 return sa_aead_setkey(authenc, key, keylen, &ad);
1894}
1895
1896static int sa_aead_cbc_sha256_setkey(struct crypto_aead *authenc,
1897 const u8 *key, unsigned int keylen)
1898{
1899 struct algo_data ad = { 0 };
1900
1901 ad.ealg_id = SA_EALG_ID_AES_CBC;
1902 ad.aalg_id = SA_AALG_ID_HMAC_SHA2_256;
1903 ad.hash_size = SHA256_DIGEST_SIZE;
1904 ad.auth_ctrl = SA_AUTH_SW_CTRL_SHA256;
1905
1906 return sa_aead_setkey(authenc, key, keylen, &ad);
1907}
1908
1909static int sa_aead_run(struct aead_request *req, u8 *iv, int enc)
1910{
1911 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1912 struct sa_tfm_ctx *ctx = crypto_aead_ctx(tfm);
1913 struct sa_req sa_req = { 0 };
1914 size_t auth_size, enc_size;
1915
1916 enc_size = req->cryptlen;
1917 auth_size = req->assoclen + req->cryptlen;
1918
1919 if (!enc) {
1920 enc_size -= crypto_aead_authsize(tfm);
1921 auth_size -= crypto_aead_authsize(tfm);
1922 }
1923
1924 if (auth_size > SA_MAX_DATA_SZ ||
1925 (auth_size >= SA_UNSAFE_DATA_SZ_MIN &&
1926 auth_size <= SA_UNSAFE_DATA_SZ_MAX)) {
1927 struct aead_request *subreq = aead_request_ctx(req);
1928 int ret;
1929
1930 aead_request_set_tfm(subreq, ctx->fallback.aead);
1931 aead_request_set_callback(subreq, req->base.flags,
1932 req->base.complete, req->base.data);
1933 aead_request_set_crypt(subreq, req->src, req->dst,
1934 req->cryptlen, req->iv);
1935 aead_request_set_ad(subreq, req->assoclen);
1936
1937 ret = enc ? crypto_aead_encrypt(subreq) :
1938 crypto_aead_decrypt(subreq);
1939 return ret;
1940 }
1941
1942 sa_req.enc_offset = req->assoclen;
1943 sa_req.enc_size = enc_size;
1944 sa_req.auth_size = auth_size;
1945 sa_req.size = auth_size;
1946 sa_req.enc_iv = iv;
1947 sa_req.type = CRYPTO_ALG_TYPE_AEAD;
1948 sa_req.enc = enc;
1949 sa_req.callback = sa_aead_dma_in_callback;
1950 sa_req.mdata_size = 52;
1951 sa_req.base = &req->base;
1952 sa_req.ctx = ctx;
1953 sa_req.src = req->src;
1954 sa_req.dst = req->dst;
1955
1956 return sa_run(&sa_req);
1957}
1958
1959/* AEAD algorithm encrypt interface function */
1960static int sa_aead_encrypt(struct aead_request *req)
1961{
1962 return sa_aead_run(req, req->iv, 1);
1963}
1964
1965/* AEAD algorithm decrypt interface function */
1966static int sa_aead_decrypt(struct aead_request *req)
1967{
1968 return sa_aead_run(req, req->iv, 0);
1969}
1970
1971static struct sa_alg_tmpl sa_algs[] = {
1972 [SA_ALG_CBC_AES] = {
1973 .type = CRYPTO_ALG_TYPE_SKCIPHER,
1974 .alg.skcipher = {
1975 .base.cra_name = "cbc(aes)",
1976 .base.cra_driver_name = "cbc-aes-sa2ul",
1977 .base.cra_priority = 30000,
1978 .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
1979 CRYPTO_ALG_KERN_DRIVER_ONLY |
1980 CRYPTO_ALG_ASYNC |
1981 CRYPTO_ALG_NEED_FALLBACK,
1982 .base.cra_blocksize = AES_BLOCK_SIZE,
1983 .base.cra_ctxsize = sizeof(struct sa_tfm_ctx),
1984 .base.cra_module = THIS_MODULE,
1985 .init = sa_cipher_cra_init,
1986 .exit = sa_cipher_cra_exit,
1987 .min_keysize = AES_MIN_KEY_SIZE,
1988 .max_keysize = AES_MAX_KEY_SIZE,
1989 .ivsize = AES_BLOCK_SIZE,
1990 .setkey = sa_aes_cbc_setkey,
1991 .encrypt = sa_encrypt,
1992 .decrypt = sa_decrypt,
1993 }
1994 },
1995 [SA_ALG_EBC_AES] = {
1996 .type = CRYPTO_ALG_TYPE_SKCIPHER,
1997 .alg.skcipher = {
1998 .base.cra_name = "ecb(aes)",
1999 .base.cra_driver_name = "ecb-aes-sa2ul",
2000 .base.cra_priority = 30000,
2001 .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
2002 CRYPTO_ALG_KERN_DRIVER_ONLY |
2003 CRYPTO_ALG_ASYNC |
2004 CRYPTO_ALG_NEED_FALLBACK,
2005 .base.cra_blocksize = AES_BLOCK_SIZE,
2006 .base.cra_ctxsize = sizeof(struct sa_tfm_ctx),
2007 .base.cra_module = THIS_MODULE,
2008 .init = sa_cipher_cra_init,
2009 .exit = sa_cipher_cra_exit,
2010 .min_keysize = AES_MIN_KEY_SIZE,
2011 .max_keysize = AES_MAX_KEY_SIZE,
2012 .setkey = sa_aes_ecb_setkey,
2013 .encrypt = sa_encrypt,
2014 .decrypt = sa_decrypt,
2015 }
2016 },
2017 [SA_ALG_CBC_DES3] = {
2018 .type = CRYPTO_ALG_TYPE_SKCIPHER,
2019 .alg.skcipher = {
2020 .base.cra_name = "cbc(des3_ede)",
2021 .base.cra_driver_name = "cbc-des3-sa2ul",
2022 .base.cra_priority = 30000,
2023 .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
2024 CRYPTO_ALG_KERN_DRIVER_ONLY |
2025 CRYPTO_ALG_ASYNC |
2026 CRYPTO_ALG_NEED_FALLBACK,
2027 .base.cra_blocksize = DES_BLOCK_SIZE,
2028 .base.cra_ctxsize = sizeof(struct sa_tfm_ctx),
2029 .base.cra_module = THIS_MODULE,
2030 .init = sa_cipher_cra_init,
2031 .exit = sa_cipher_cra_exit,
2032 .min_keysize = 3 * DES_KEY_SIZE,
2033 .max_keysize = 3 * DES_KEY_SIZE,
2034 .ivsize = DES_BLOCK_SIZE,
2035 .setkey = sa_3des_cbc_setkey,
2036 .encrypt = sa_encrypt,
2037 .decrypt = sa_decrypt,
2038 }
2039 },
2040 [SA_ALG_ECB_DES3] = {
2041 .type = CRYPTO_ALG_TYPE_SKCIPHER,
2042 .alg.skcipher = {
2043 .base.cra_name = "ecb(des3_ede)",
2044 .base.cra_driver_name = "ecb-des3-sa2ul",
2045 .base.cra_priority = 30000,
2046 .base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
2047 CRYPTO_ALG_KERN_DRIVER_ONLY |
2048 CRYPTO_ALG_ASYNC |
2049 CRYPTO_ALG_NEED_FALLBACK,
2050 .base.cra_blocksize = DES_BLOCK_SIZE,
2051 .base.cra_ctxsize = sizeof(struct sa_tfm_ctx),
2052 .base.cra_module = THIS_MODULE,
2053 .init = sa_cipher_cra_init,
2054 .exit = sa_cipher_cra_exit,
2055 .min_keysize = 3 * DES_KEY_SIZE,
2056 .max_keysize = 3 * DES_KEY_SIZE,
2057 .setkey = sa_3des_ecb_setkey,
2058 .encrypt = sa_encrypt,
2059 .decrypt = sa_decrypt,
2060 }
2061 },
2062 [SA_ALG_SHA1] = {
2063 .type = CRYPTO_ALG_TYPE_AHASH,
2064 .alg.ahash = {
2065 .halg.base = {
2066 .cra_name = "sha1",
2067 .cra_driver_name = "sha1-sa2ul",
2068 .cra_priority = 400,
2069 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2070 CRYPTO_ALG_ASYNC |
2071 CRYPTO_ALG_KERN_DRIVER_ONLY |
2072 CRYPTO_ALG_NEED_FALLBACK,
2073 .cra_blocksize = SHA1_BLOCK_SIZE,
2074 .cra_ctxsize = sizeof(struct sa_tfm_ctx),
2075 .cra_module = THIS_MODULE,
2076 .cra_init = sa_sha1_cra_init,
2077 .cra_exit = sa_sha_cra_exit,
2078 },
2079 .halg.digestsize = SHA1_DIGEST_SIZE,
2080 .halg.statesize = sizeof(struct sa_sha_req_ctx) +
2081 sizeof(struct sha1_state),
2082 .init = sa_sha_init,
2083 .update = sa_sha_update,
2084 .final = sa_sha_final,
2085 .finup = sa_sha_finup,
2086 .digest = sa_sha_digest,
2087 .export = sa_sha_export,
2088 .import = sa_sha_import,
2089 },
2090 },
2091 [SA_ALG_SHA256] = {
2092 .type = CRYPTO_ALG_TYPE_AHASH,
2093 .alg.ahash = {
2094 .halg.base = {
2095 .cra_name = "sha256",
2096 .cra_driver_name = "sha256-sa2ul",
2097 .cra_priority = 400,
2098 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2099 CRYPTO_ALG_ASYNC |
2100 CRYPTO_ALG_KERN_DRIVER_ONLY |
2101 CRYPTO_ALG_NEED_FALLBACK,
2102 .cra_blocksize = SHA256_BLOCK_SIZE,
2103 .cra_ctxsize = sizeof(struct sa_tfm_ctx),
2104 .cra_module = THIS_MODULE,
2105 .cra_init = sa_sha256_cra_init,
2106 .cra_exit = sa_sha_cra_exit,
2107 },
2108 .halg.digestsize = SHA256_DIGEST_SIZE,
2109 .halg.statesize = sizeof(struct sa_sha_req_ctx) +
2110 sizeof(struct sha256_state),
2111 .init = sa_sha_init,
2112 .update = sa_sha_update,
2113 .final = sa_sha_final,
2114 .finup = sa_sha_finup,
2115 .digest = sa_sha_digest,
2116 .export = sa_sha_export,
2117 .import = sa_sha_import,
2118 },
2119 },
2120 [SA_ALG_SHA512] = {
2121 .type = CRYPTO_ALG_TYPE_AHASH,
2122 .alg.ahash = {
2123 .halg.base = {
2124 .cra_name = "sha512",
2125 .cra_driver_name = "sha512-sa2ul",
2126 .cra_priority = 400,
2127 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2128 CRYPTO_ALG_ASYNC |
2129 CRYPTO_ALG_KERN_DRIVER_ONLY |
2130 CRYPTO_ALG_NEED_FALLBACK,
2131 .cra_blocksize = SHA512_BLOCK_SIZE,
2132 .cra_ctxsize = sizeof(struct sa_tfm_ctx),
2133 .cra_module = THIS_MODULE,
2134 .cra_init = sa_sha512_cra_init,
2135 .cra_exit = sa_sha_cra_exit,
2136 },
2137 .halg.digestsize = SHA512_DIGEST_SIZE,
2138 .halg.statesize = sizeof(struct sa_sha_req_ctx) +
2139 sizeof(struct sha512_state),
2140 .init = sa_sha_init,
2141 .update = sa_sha_update,
2142 .final = sa_sha_final,
2143 .finup = sa_sha_finup,
2144 .digest = sa_sha_digest,
2145 .export = sa_sha_export,
2146 .import = sa_sha_import,
2147 },
2148 },
2149 [SA_ALG_AUTHENC_SHA1_AES] = {
2150 .type = CRYPTO_ALG_TYPE_AEAD,
2151 .alg.aead = {
2152 .base = {
2153 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2154 .cra_driver_name =
2155 "authenc(hmac(sha1),cbc(aes))-sa2ul",
2156 .cra_blocksize = AES_BLOCK_SIZE,
2157 .cra_flags = CRYPTO_ALG_TYPE_AEAD |
2158 CRYPTO_ALG_KERN_DRIVER_ONLY |
2159 CRYPTO_ALG_ASYNC |
2160 CRYPTO_ALG_NEED_FALLBACK,
2161 .cra_ctxsize = sizeof(struct sa_tfm_ctx),
2162 .cra_module = THIS_MODULE,
2163 .cra_priority = 3000,
2164 },
2165 .ivsize = AES_BLOCK_SIZE,
2166 .maxauthsize = SHA1_DIGEST_SIZE,
2167
2168 .init = sa_cra_init_aead_sha1,
2169 .exit = sa_exit_tfm_aead,
2170 .setkey = sa_aead_cbc_sha1_setkey,
2171 .setauthsize = sa_aead_setauthsize,
2172 .encrypt = sa_aead_encrypt,
2173 .decrypt = sa_aead_decrypt,
2174 },
2175 },
2176 [SA_ALG_AUTHENC_SHA256_AES] = {
2177 .type = CRYPTO_ALG_TYPE_AEAD,
2178 .alg.aead = {
2179 .base = {
2180 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2181 .cra_driver_name =
2182 "authenc(hmac(sha256),cbc(aes))-sa2ul",
2183 .cra_blocksize = AES_BLOCK_SIZE,
2184 .cra_flags = CRYPTO_ALG_TYPE_AEAD |
2185 CRYPTO_ALG_KERN_DRIVER_ONLY |
2186 CRYPTO_ALG_ASYNC |
2187 CRYPTO_ALG_NEED_FALLBACK,
2188 .cra_ctxsize = sizeof(struct sa_tfm_ctx),
2189 .cra_module = THIS_MODULE,
2190 .cra_alignmask = 0,
2191 .cra_priority = 3000,
2192 },
2193 .ivsize = AES_BLOCK_SIZE,
2194 .maxauthsize = SHA256_DIGEST_SIZE,
2195
2196 .init = sa_cra_init_aead_sha256,
2197 .exit = sa_exit_tfm_aead,
2198 .setkey = sa_aead_cbc_sha256_setkey,
2199 .setauthsize = sa_aead_setauthsize,
2200 .encrypt = sa_aead_encrypt,
2201 .decrypt = sa_aead_decrypt,
2202 },
2203 },
2204};
2205
2206/* Register the algorithms in crypto framework */
2207static void sa_register_algos(struct sa_crypto_data *dev_data)
2208{
2209 const struct sa_match_data *match_data = dev_data->match_data;
2210 struct device *dev = dev_data->dev;
2211 char *alg_name;
2212 u32 type;
2213 int i, err;
2214
2215 for (i = 0; i < ARRAY_SIZE(sa_algs); i++) {
2216 /* Skip unsupported algos */
2217 if (!(match_data->supported_algos & BIT(i)))
2218 continue;
2219
2220 type = sa_algs[i].type;
2221 if (type == CRYPTO_ALG_TYPE_SKCIPHER) {
2222 alg_name = sa_algs[i].alg.skcipher.base.cra_name;
2223 err = crypto_register_skcipher(&sa_algs[i].alg.skcipher);
2224 } else if (type == CRYPTO_ALG_TYPE_AHASH) {
2225 alg_name = sa_algs[i].alg.ahash.halg.base.cra_name;
2226 err = crypto_register_ahash(&sa_algs[i].alg.ahash);
2227 } else if (type == CRYPTO_ALG_TYPE_AEAD) {
2228 alg_name = sa_algs[i].alg.aead.base.cra_name;
2229 err = crypto_register_aead(&sa_algs[i].alg.aead);
2230 } else {
2231 dev_err(dev,
2232 "un-supported crypto algorithm (%d)",
2233 sa_algs[i].type);
2234 continue;
2235 }
2236
2237 if (err)
2238 dev_err(dev, "Failed to register '%s'\n", alg_name);
2239 else
2240 sa_algs[i].registered = true;
2241 }
2242}
2243
2244/* Unregister the algorithms in crypto framework */
2245static void sa_unregister_algos(const struct device *dev)
2246{
2247 u32 type;
2248 int i;
2249
2250 for (i = 0; i < ARRAY_SIZE(sa_algs); i++) {
2251 type = sa_algs[i].type;
2252 if (!sa_algs[i].registered)
2253 continue;
2254 if (type == CRYPTO_ALG_TYPE_SKCIPHER)
2255 crypto_unregister_skcipher(&sa_algs[i].alg.skcipher);
2256 else if (type == CRYPTO_ALG_TYPE_AHASH)
2257 crypto_unregister_ahash(&sa_algs[i].alg.ahash);
2258 else if (type == CRYPTO_ALG_TYPE_AEAD)
2259 crypto_unregister_aead(&sa_algs[i].alg.aead);
2260
2261 sa_algs[i].registered = false;
2262 }
2263}
2264
2265static int sa_init_mem(struct sa_crypto_data *dev_data)
2266{
2267 struct device *dev = &dev_data->pdev->dev;
2268 /* Setup dma pool for security context buffers */
2269 dev_data->sc_pool = dma_pool_create("keystone-sc", dev,
2270 SA_CTX_MAX_SZ, 64, 0);
2271 if (!dev_data->sc_pool) {
2272 dev_err(dev, "Failed to create dma pool");
2273 return -ENOMEM;
2274 }
2275
2276 return 0;
2277}
2278
2279static int sa_dma_init(struct sa_crypto_data *dd)
2280{
2281 int ret;
2282 struct dma_slave_config cfg;
2283
2284 dd->dma_rx1 = NULL;
2285 dd->dma_tx = NULL;
2286 dd->dma_rx2 = NULL;
2287
2288 ret = dma_coerce_mask_and_coherent(dd->dev, DMA_BIT_MASK(48));
2289 if (ret)
2290 return ret;
2291
2292 dd->dma_rx1 = dma_request_chan(dd->dev, "rx1");
2293 if (IS_ERR(dd->dma_rx1))
2294 return dev_err_probe(dd->dev, PTR_ERR(dd->dma_rx1),
2295 "Unable to request rx1 DMA channel\n");
2296
2297 dd->dma_rx2 = dma_request_chan(dd->dev, "rx2");
2298 if (IS_ERR(dd->dma_rx2)) {
2299 ret = dev_err_probe(dd->dev, PTR_ERR(dd->dma_rx2),
2300 "Unable to request rx2 DMA channel\n");
2301 goto err_dma_rx2;
2302 }
2303
2304 dd->dma_tx = dma_request_chan(dd->dev, "tx");
2305 if (IS_ERR(dd->dma_tx)) {
2306 ret = dev_err_probe(dd->dev, PTR_ERR(dd->dma_tx),
2307 "Unable to request tx DMA channel\n");
2308 goto err_dma_tx;
2309 }
2310
2311 memzero_explicit(&cfg, sizeof(cfg));
2312
2313 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2314 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2315 cfg.src_maxburst = 4;
2316 cfg.dst_maxburst = 4;
2317
2318 ret = dmaengine_slave_config(dd->dma_rx1, &cfg);
2319 if (ret) {
2320 dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
2321 ret);
2322 goto err_dma_config;
2323 }
2324
2325 ret = dmaengine_slave_config(dd->dma_rx2, &cfg);
2326 if (ret) {
2327 dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
2328 ret);
2329 goto err_dma_config;
2330 }
2331
2332 ret = dmaengine_slave_config(dd->dma_tx, &cfg);
2333 if (ret) {
2334 dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
2335 ret);
2336 goto err_dma_config;
2337 }
2338
2339 return 0;
2340
2341err_dma_config:
2342 dma_release_channel(dd->dma_tx);
2343err_dma_tx:
2344 dma_release_channel(dd->dma_rx2);
2345err_dma_rx2:
2346 dma_release_channel(dd->dma_rx1);
2347
2348 return ret;
2349}
2350
2351static int sa_link_child(struct device *dev, void *data)
2352{
2353 struct device *parent = data;
2354
2355 device_link_add(dev, parent, DL_FLAG_AUTOPROBE_CONSUMER);
2356
2357 return 0;
2358}
2359
2360static struct sa_match_data am654_match_data = {
2361 .priv = 1,
2362 .priv_id = 1,
2363 .supported_algos = BIT(SA_ALG_CBC_AES) |
2364 BIT(SA_ALG_EBC_AES) |
2365 BIT(SA_ALG_CBC_DES3) |
2366 BIT(SA_ALG_ECB_DES3) |
2367 BIT(SA_ALG_SHA1) |
2368 BIT(SA_ALG_SHA256) |
2369 BIT(SA_ALG_SHA512) |
2370 BIT(SA_ALG_AUTHENC_SHA1_AES) |
2371 BIT(SA_ALG_AUTHENC_SHA256_AES),
2372};
2373
2374static struct sa_match_data am64_match_data = {
2375 .priv = 0,
2376 .priv_id = 0,
2377 .supported_algos = BIT(SA_ALG_CBC_AES) |
2378 BIT(SA_ALG_EBC_AES) |
2379 BIT(SA_ALG_SHA256) |
2380 BIT(SA_ALG_SHA512) |
2381 BIT(SA_ALG_AUTHENC_SHA256_AES),
2382};
2383
2384static const struct of_device_id of_match[] = {
2385 { .compatible = "ti,j721e-sa2ul", .data = &am654_match_data, },
2386 { .compatible = "ti,am654-sa2ul", .data = &am654_match_data, },
2387 { .compatible = "ti,am64-sa2ul", .data = &am64_match_data, },
2388 { .compatible = "ti,am62-sa3ul", .data = &am64_match_data, },
2389 {},
2390};
2391MODULE_DEVICE_TABLE(of, of_match);
2392
2393static int sa_ul_probe(struct platform_device *pdev)
2394{
2395 struct device *dev = &pdev->dev;
2396 struct device_node *node = dev->of_node;
2397 static void __iomem *saul_base;
2398 struct sa_crypto_data *dev_data;
2399 u32 status, val;
2400 int ret;
2401
2402 dev_data = devm_kzalloc(dev, sizeof(*dev_data), GFP_KERNEL);
2403 if (!dev_data)
2404 return -ENOMEM;
2405
2406 dev_data->match_data = of_device_get_match_data(dev);
2407 if (!dev_data->match_data)
2408 return -ENODEV;
2409
2410 saul_base = devm_platform_ioremap_resource(pdev, 0);
2411 if (IS_ERR(saul_base))
2412 return PTR_ERR(saul_base);
2413
2414 sa_k3_dev = dev;
2415 dev_data->dev = dev;
2416 dev_data->pdev = pdev;
2417 dev_data->base = saul_base;
2418 platform_set_drvdata(pdev, dev_data);
2419 dev_set_drvdata(sa_k3_dev, dev_data);
2420
2421 pm_runtime_enable(dev);
2422 ret = pm_runtime_resume_and_get(dev);
2423 if (ret < 0) {
2424 dev_err(dev, "%s: failed to get sync: %d\n", __func__, ret);
2425 pm_runtime_disable(dev);
2426 return ret;
2427 }
2428
2429 sa_init_mem(dev_data);
2430 ret = sa_dma_init(dev_data);
2431 if (ret)
2432 goto destroy_dma_pool;
2433
2434 spin_lock_init(&dev_data->scid_lock);
2435
2436 val = SA_EEC_ENCSS_EN | SA_EEC_AUTHSS_EN | SA_EEC_CTXCACH_EN |
2437 SA_EEC_CPPI_PORT_IN_EN | SA_EEC_CPPI_PORT_OUT_EN |
2438 SA_EEC_TRNG_EN;
2439 status = readl_relaxed(saul_base + SA_ENGINE_STATUS);
2440 /* Only enable engines if all are not already enabled */
2441 if (val & ~status)
2442 writel_relaxed(val, saul_base + SA_ENGINE_ENABLE_CONTROL);
2443
2444 sa_register_algos(dev_data);
2445
2446 ret = of_platform_populate(node, NULL, NULL, dev);
2447 if (ret)
2448 goto release_dma;
2449
2450 device_for_each_child(dev, dev, sa_link_child);
2451
2452 return 0;
2453
2454release_dma:
2455 sa_unregister_algos(dev);
2456
2457 dma_release_channel(dev_data->dma_rx2);
2458 dma_release_channel(dev_data->dma_rx1);
2459 dma_release_channel(dev_data->dma_tx);
2460
2461destroy_dma_pool:
2462 dma_pool_destroy(dev_data->sc_pool);
2463
2464 pm_runtime_put_sync(dev);
2465 pm_runtime_disable(dev);
2466
2467 return ret;
2468}
2469
2470static int sa_ul_remove(struct platform_device *pdev)
2471{
2472 struct sa_crypto_data *dev_data = platform_get_drvdata(pdev);
2473
2474 of_platform_depopulate(&pdev->dev);
2475
2476 sa_unregister_algos(&pdev->dev);
2477
2478 dma_release_channel(dev_data->dma_rx2);
2479 dma_release_channel(dev_data->dma_rx1);
2480 dma_release_channel(dev_data->dma_tx);
2481
2482 dma_pool_destroy(dev_data->sc_pool);
2483
2484 platform_set_drvdata(pdev, NULL);
2485
2486 pm_runtime_put_sync(&pdev->dev);
2487 pm_runtime_disable(&pdev->dev);
2488
2489 return 0;
2490}
2491
2492static struct platform_driver sa_ul_driver = {
2493 .probe = sa_ul_probe,
2494 .remove = sa_ul_remove,
2495 .driver = {
2496 .name = "saul-crypto",
2497 .of_match_table = of_match,
2498 },
2499};
2500module_platform_driver(sa_ul_driver);
2501MODULE_LICENSE("GPL v2");