Loading...
1
2/* SPDX-License-Identifier: GPL-2.0-only */
3/*
4 * Copyright 2016 Broadcom
5 */
6
7#ifndef _CIPHER_H
8#define _CIPHER_H
9
10#include <linux/atomic.h>
11#include <linux/mailbox/brcm-message.h>
12#include <linux/mailbox_client.h>
13#include <crypto/aes.h>
14#include <crypto/internal/hash.h>
15#include <crypto/internal/skcipher.h>
16#include <crypto/aead.h>
17#include <crypto/arc4.h>
18#include <crypto/gcm.h>
19#include <crypto/sha1.h>
20#include <crypto/sha2.h>
21#include <crypto/sha3.h>
22
23#include "spu.h"
24#include "spum.h"
25#include "spu2.h"
26
27/* Driver supports up to MAX_SPUS SPU blocks */
28#define MAX_SPUS 16
29
30#define ARC4_STATE_SIZE 4
31
32#define CCM_AES_IV_SIZE 16
33#define CCM_ESP_IV_SIZE 8
34#define RFC4543_ICV_SIZE 16
35
36#define MAX_KEY_SIZE ARC4_MAX_KEY_SIZE
37#define MAX_IV_SIZE AES_BLOCK_SIZE
38#define MAX_DIGEST_SIZE SHA3_512_DIGEST_SIZE
39#define MAX_ASSOC_SIZE 512
40
41/* size of salt value for AES-GCM-ESP and AES-CCM-ESP */
42#define GCM_ESP_SALT_SIZE 4
43#define CCM_ESP_SALT_SIZE 3
44#define MAX_SALT_SIZE GCM_ESP_SALT_SIZE
45#define GCM_ESP_SALT_OFFSET 0
46#define CCM_ESP_SALT_OFFSET 1
47
48#define GCM_ESP_DIGESTSIZE 16
49
50#define MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
51
52/*
53 * Maximum number of bytes from a non-final hash request that can be deferred
54 * until more data is available. With new crypto API framework, this
55 * can be no more than one block of data.
56 */
57#define HASH_CARRY_MAX MAX_HASH_BLOCK_SIZE
58
59/* Force at least 4-byte alignment of all SPU message fields */
60#define SPU_MSG_ALIGN 4
61
62/* Number of times to resend mailbox message if mb queue is full */
63#define SPU_MB_RETRY_MAX 1000
64
65/* op_counts[] indexes */
66enum op_type {
67 SPU_OP_CIPHER,
68 SPU_OP_HASH,
69 SPU_OP_HMAC,
70 SPU_OP_AEAD,
71 SPU_OP_NUM
72};
73
74enum spu_spu_type {
75 SPU_TYPE_SPUM,
76 SPU_TYPE_SPU2,
77};
78
79/*
80 * SPUM_NS2 and SPUM_NSP are the SPU-M block on Northstar 2 and Northstar Plus,
81 * respectively.
82 */
83enum spu_spu_subtype {
84 SPU_SUBTYPE_SPUM_NS2,
85 SPU_SUBTYPE_SPUM_NSP,
86 SPU_SUBTYPE_SPU2_V1,
87 SPU_SUBTYPE_SPU2_V2
88};
89
90struct spu_type_subtype {
91 enum spu_spu_type type;
92 enum spu_spu_subtype subtype;
93};
94
95struct cipher_op {
96 enum spu_cipher_alg alg;
97 enum spu_cipher_mode mode;
98};
99
100struct auth_op {
101 enum hash_alg alg;
102 enum hash_mode mode;
103};
104
105struct iproc_alg_s {
106 u32 type;
107 union {
108 struct skcipher_alg skcipher;
109 struct ahash_alg hash;
110 struct aead_alg aead;
111 } alg;
112 struct cipher_op cipher_info;
113 struct auth_op auth_info;
114 bool auth_first;
115 bool registered;
116};
117
118/*
119 * Buffers for a SPU request/reply message pair. All part of one structure to
120 * allow a single alloc per request.
121 */
122struct spu_msg_buf {
123 /* Request message fragments */
124
125 /*
126 * SPU request message header. For SPU-M, holds MH, EMH, SCTX, BDESC,
127 * and BD header. For SPU2, holds FMD, OMD.
128 */
129 u8 bcm_spu_req_hdr[ALIGN(SPU2_HEADER_ALLOC_LEN, SPU_MSG_ALIGN)];
130
131 /* IV or counter. Size to include salt. Also used for XTS tweek. */
132 u8 iv_ctr[ALIGN(2 * AES_BLOCK_SIZE, SPU_MSG_ALIGN)];
133
134 /* Hash digest. request and response. */
135 u8 digest[ALIGN(MAX_DIGEST_SIZE, SPU_MSG_ALIGN)];
136
137 /* SPU request message padding */
138 u8 spu_req_pad[ALIGN(SPU_PAD_LEN_MAX, SPU_MSG_ALIGN)];
139
140 /* SPU-M request message STATUS field */
141 u8 tx_stat[ALIGN(SPU_TX_STATUS_LEN, SPU_MSG_ALIGN)];
142
143 /* Response message fragments */
144
145 /* SPU response message header */
146 u8 spu_resp_hdr[ALIGN(SPU2_HEADER_ALLOC_LEN, SPU_MSG_ALIGN)];
147
148 /* SPU response message STATUS field padding */
149 u8 rx_stat_pad[ALIGN(SPU_STAT_PAD_MAX, SPU_MSG_ALIGN)];
150
151 /* SPU response message STATUS field */
152 u8 rx_stat[ALIGN(SPU_RX_STATUS_LEN, SPU_MSG_ALIGN)];
153
154 union {
155 /* Buffers only used for skcipher */
156 struct {
157 /*
158 * Field used for either SUPDT when RC4 is used
159 * -OR- tweak value when XTS/AES is used
160 */
161 u8 supdt_tweak[ALIGN(SPU_SUPDT_LEN, SPU_MSG_ALIGN)];
162 } c;
163
164 /* Buffers only used for aead */
165 struct {
166 /* SPU response pad for GCM data */
167 u8 gcmpad[ALIGN(AES_BLOCK_SIZE, SPU_MSG_ALIGN)];
168
169 /* SPU request msg padding for GCM AAD */
170 u8 req_aad_pad[ALIGN(SPU_PAD_LEN_MAX, SPU_MSG_ALIGN)];
171
172 /* SPU response data to be discarded */
173 u8 resp_aad[ALIGN(MAX_ASSOC_SIZE + MAX_IV_SIZE,
174 SPU_MSG_ALIGN)];
175 } a;
176 };
177};
178
179struct iproc_ctx_s {
180 u8 enckey[MAX_KEY_SIZE + ARC4_STATE_SIZE];
181 unsigned int enckeylen;
182
183 u8 authkey[MAX_KEY_SIZE + ARC4_STATE_SIZE];
184 unsigned int authkeylen;
185
186 u8 salt[MAX_SALT_SIZE];
187 unsigned int salt_len;
188 unsigned int salt_offset;
189 u8 iv[MAX_IV_SIZE];
190
191 unsigned int digestsize;
192
193 struct iproc_alg_s *alg;
194 bool is_esp;
195
196 struct cipher_op cipher;
197 enum spu_cipher_type cipher_type;
198
199 struct auth_op auth;
200 bool auth_first;
201
202 /*
203 * The maximum length in bytes of the payload in a SPU message for this
204 * context. For SPU-M, the payload is the combination of AAD and data.
205 * For SPU2, the payload is just data. A value of SPU_MAX_PAYLOAD_INF
206 * indicates that there is no limit to the length of the SPU message
207 * payload.
208 */
209 unsigned int max_payload;
210
211 struct crypto_aead *fallback_cipher;
212
213 /* auth_type is determined during processing of request */
214
215 u8 ipad[MAX_HASH_BLOCK_SIZE];
216 u8 opad[MAX_HASH_BLOCK_SIZE];
217
218 /*
219 * Buffer to hold SPU message header template. Template is created at
220 * setkey time for skcipher requests, since most of the fields in the
221 * header are known at that time. At request time, just fill in a few
222 * missing pieces related to length of data in the request and IVs, etc.
223 */
224 u8 bcm_spu_req_hdr[ALIGN(SPU2_HEADER_ALLOC_LEN, SPU_MSG_ALIGN)];
225
226 /* Length of SPU request header */
227 u16 spu_req_hdr_len;
228
229 /* Expected length of SPU response header */
230 u16 spu_resp_hdr_len;
231
232 /*
233 * shash descriptor - needed to perform incremental hashing in
234 * software, when hw doesn't support it.
235 */
236 struct shash_desc *shash;
237
238 bool is_rfc4543; /* RFC 4543 style of GMAC */
239};
240
241/* state from iproc_reqctx_s necessary for hash state export/import */
242struct spu_hash_export_s {
243 unsigned int total_todo;
244 unsigned int total_sent;
245 u8 hash_carry[HASH_CARRY_MAX];
246 unsigned int hash_carry_len;
247 u8 incr_hash[MAX_DIGEST_SIZE];
248 bool is_sw_hmac;
249};
250
251struct iproc_reqctx_s {
252 /* general context */
253 struct crypto_async_request *parent;
254
255 /* only valid after enqueue() */
256 struct iproc_ctx_s *ctx;
257
258 u8 chan_idx; /* Mailbox channel to be used to submit this request */
259
260 /* total todo, rx'd, and sent for this request */
261 unsigned int total_todo;
262 unsigned int total_received; /* only valid for skcipher */
263 unsigned int total_sent;
264
265 /*
266 * num bytes sent to hw from the src sg in this request. This can differ
267 * from total_sent for incremental hashing. total_sent includes previous
268 * init() and update() data. src_sent does not.
269 */
270 unsigned int src_sent;
271
272 /*
273 * For AEAD requests, start of associated data. This will typically
274 * point to the beginning of the src scatterlist from the request,
275 * since assoc data is at the beginning of the src scatterlist rather
276 * than in its own sg.
277 */
278 struct scatterlist *assoc;
279
280 /*
281 * scatterlist entry and offset to start of data for next chunk. Crypto
282 * API src scatterlist for AEAD starts with AAD, if present. For first
283 * chunk, src_sg is sg entry at beginning of input data (after AAD).
284 * src_skip begins at the offset in that sg entry where data begins.
285 */
286 struct scatterlist *src_sg;
287 int src_nents; /* Number of src entries with data */
288 u32 src_skip; /* bytes of current sg entry already used */
289
290 /*
291 * Same for destination. For AEAD, if there is AAD, output data must
292 * be written at offset following AAD.
293 */
294 struct scatterlist *dst_sg;
295 int dst_nents; /* Number of dst entries with data */
296 u32 dst_skip; /* bytes of current sg entry already written */
297
298 /* Mailbox message used to send this request to PDC driver */
299 struct brcm_message mb_mssg;
300
301 bool bd_suppress; /* suppress BD field in SPU response? */
302
303 /* cipher context */
304 bool is_encrypt;
305
306 /*
307 * CBC mode: IV. CTR mode: counter. Else empty. Used as a DMA
308 * buffer for AEAD requests. So allocate as DMAable memory. If IV
309 * concatenated with salt, includes the salt.
310 */
311 u8 *iv_ctr;
312 /* Length of IV or counter, in bytes */
313 unsigned int iv_ctr_len;
314
315 /*
316 * Hash requests can be of any size, whether initial, update, or final.
317 * A non-final request must be submitted to the SPU as an integral
318 * number of blocks. This may leave data at the end of the request
319 * that is not a full block. Since the request is non-final, it cannot
320 * be padded. So, we write the remainder to this hash_carry buffer and
321 * hold it until the next request arrives. The carry data is then
322 * submitted at the beginning of the data in the next SPU msg.
323 * hash_carry_len is the number of bytes currently in hash_carry. These
324 * fields are only used for ahash requests.
325 */
326 u8 hash_carry[HASH_CARRY_MAX];
327 unsigned int hash_carry_len;
328 unsigned int is_final; /* is this the final for the hash op? */
329
330 /*
331 * Digest from incremental hash is saved here to include in next hash
332 * operation. Cannot be stored in req->result for truncated hashes,
333 * since result may be sized for final digest. Cannot be saved in
334 * msg_buf because that gets deleted between incremental hash ops
335 * and is not saved as part of export().
336 */
337 u8 incr_hash[MAX_DIGEST_SIZE];
338
339 /* hmac context */
340 bool is_sw_hmac;
341
342 gfp_t gfp;
343
344 /* Buffers used to build SPU request and response messages */
345 struct spu_msg_buf msg_buf;
346
347 struct aead_request req;
348};
349
350/*
351 * Structure encapsulates a set of function pointers specific to the type of
352 * SPU hardware running. These functions handling creation and parsing of
353 * SPU request messages and SPU response messages. Includes hardware-specific
354 * values read from device tree.
355 */
356struct spu_hw {
357 void (*spu_dump_msg_hdr)(u8 *buf, unsigned int buf_len);
358 u32 (*spu_ctx_max_payload)(enum spu_cipher_alg cipher_alg,
359 enum spu_cipher_mode cipher_mode,
360 unsigned int blocksize);
361 u32 (*spu_payload_length)(u8 *spu_hdr);
362 u16 (*spu_response_hdr_len)(u16 auth_key_len, u16 enc_key_len,
363 bool is_hash);
364 u16 (*spu_hash_pad_len)(enum hash_alg hash_alg,
365 enum hash_mode hash_mode, u32 chunksize,
366 u16 hash_block_size);
367 u32 (*spu_gcm_ccm_pad_len)(enum spu_cipher_mode cipher_mode,
368 unsigned int data_size);
369 u32 (*spu_assoc_resp_len)(enum spu_cipher_mode cipher_mode,
370 unsigned int assoc_len,
371 unsigned int iv_len, bool is_encrypt);
372 u8 (*spu_aead_ivlen)(enum spu_cipher_mode cipher_mode,
373 u16 iv_len);
374 enum hash_type (*spu_hash_type)(u32 src_sent);
375 u32 (*spu_digest_size)(u32 digest_size, enum hash_alg alg,
376 enum hash_type);
377 u32 (*spu_create_request)(u8 *spu_hdr,
378 struct spu_request_opts *req_opts,
379 struct spu_cipher_parms *cipher_parms,
380 struct spu_hash_parms *hash_parms,
381 struct spu_aead_parms *aead_parms,
382 unsigned int data_size);
383 u16 (*spu_cipher_req_init)(u8 *spu_hdr,
384 struct spu_cipher_parms *cipher_parms);
385 void (*spu_cipher_req_finish)(u8 *spu_hdr,
386 u16 spu_req_hdr_len,
387 unsigned int is_inbound,
388 struct spu_cipher_parms *cipher_parms,
389 unsigned int data_size);
390 void (*spu_request_pad)(u8 *pad_start, u32 gcm_padding,
391 u32 hash_pad_len, enum hash_alg auth_alg,
392 enum hash_mode auth_mode,
393 unsigned int total_sent, u32 status_padding);
394 u8 (*spu_xts_tweak_in_payload)(void);
395 u8 (*spu_tx_status_len)(void);
396 u8 (*spu_rx_status_len)(void);
397 int (*spu_status_process)(u8 *statp);
398 void (*spu_ccm_update_iv)(unsigned int digestsize,
399 struct spu_cipher_parms *cipher_parms,
400 unsigned int assoclen, unsigned int chunksize,
401 bool is_encrypt, bool is_esp);
402 u32 (*spu_wordalign_padlen)(u32 data_size);
403
404 /* The base virtual address of the SPU hw registers */
405 void __iomem *reg_vbase[MAX_SPUS];
406
407 /* Version of the SPU hardware */
408 enum spu_spu_type spu_type;
409
410 /* Sub-version of the SPU hardware */
411 enum spu_spu_subtype spu_subtype;
412
413 /* The number of SPUs on this platform */
414 u32 num_spu;
415
416 /* The number of SPU channels on this platform */
417 u32 num_chan;
418};
419
420struct bcm_device_private {
421 struct platform_device *pdev;
422
423 struct spu_hw spu;
424
425 atomic_t session_count; /* number of streams active */
426 atomic_t stream_count; /* monotonic counter for streamID's */
427
428 /* Length of BCM header. Set to 0 when hw does not expect BCM HEADER. */
429 u8 bcm_hdr_len;
430
431 /* The index of the channel to use for the next crypto request */
432 atomic_t next_chan;
433
434 struct dentry *debugfs_dir;
435 struct dentry *debugfs_stats;
436
437 /* Number of request bytes processed and result bytes returned */
438 atomic64_t bytes_in;
439 atomic64_t bytes_out;
440
441 /* Number of operations of each type */
442 atomic_t op_counts[SPU_OP_NUM];
443
444 atomic_t cipher_cnt[CIPHER_ALG_LAST][CIPHER_MODE_LAST];
445 atomic_t hash_cnt[HASH_ALG_LAST];
446 atomic_t hmac_cnt[HASH_ALG_LAST];
447 atomic_t aead_cnt[AEAD_TYPE_LAST];
448
449 /* Number of calls to setkey() for each operation type */
450 atomic_t setkey_cnt[SPU_OP_NUM];
451
452 /* Number of times request was resubmitted because mb was full */
453 atomic_t mb_no_spc;
454
455 /* Number of mailbox send failures */
456 atomic_t mb_send_fail;
457
458 /* Number of ICV check failures for AEAD messages */
459 atomic_t bad_icv;
460
461 struct mbox_client mcl;
462
463 /* Array of mailbox channel pointers, one for each channel */
464 struct mbox_chan **mbox;
465};
466
467extern struct bcm_device_private iproc_priv;
468
469#endif
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright 2016 Broadcom
4 */
5
6#ifndef _CIPHER_H
7#define _CIPHER_H
8
9#include <linux/atomic.h>
10#include <linux/mailbox/brcm-message.h>
11#include <linux/mailbox_client.h>
12#include <crypto/aes.h>
13#include <crypto/internal/hash.h>
14#include <crypto/aead.h>
15#include <crypto/arc4.h>
16#include <crypto/gcm.h>
17#include <crypto/sha.h>
18#include <crypto/sha3.h>
19
20#include "spu.h"
21#include "spum.h"
22#include "spu2.h"
23
24/* Driver supports up to MAX_SPUS SPU blocks */
25#define MAX_SPUS 16
26
27#define ARC4_STATE_SIZE 4
28
29#define CCM_AES_IV_SIZE 16
30#define CCM_ESP_IV_SIZE 8
31#define RFC4543_ICV_SIZE 16
32
33#define MAX_KEY_SIZE ARC4_MAX_KEY_SIZE
34#define MAX_IV_SIZE AES_BLOCK_SIZE
35#define MAX_DIGEST_SIZE SHA3_512_DIGEST_SIZE
36#define MAX_ASSOC_SIZE 512
37
38/* size of salt value for AES-GCM-ESP and AES-CCM-ESP */
39#define GCM_ESP_SALT_SIZE 4
40#define CCM_ESP_SALT_SIZE 3
41#define MAX_SALT_SIZE GCM_ESP_SALT_SIZE
42#define GCM_ESP_SALT_OFFSET 0
43#define CCM_ESP_SALT_OFFSET 1
44
45#define GCM_ESP_DIGESTSIZE 16
46
47#define MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
48
49/*
50 * Maximum number of bytes from a non-final hash request that can be deferred
51 * until more data is available. With new crypto API framework, this
52 * can be no more than one block of data.
53 */
54#define HASH_CARRY_MAX MAX_HASH_BLOCK_SIZE
55
56/* Force at least 4-byte alignment of all SPU message fields */
57#define SPU_MSG_ALIGN 4
58
59/* Number of times to resend mailbox message if mb queue is full */
60#define SPU_MB_RETRY_MAX 1000
61
62/* op_counts[] indexes */
63enum op_type {
64 SPU_OP_CIPHER,
65 SPU_OP_HASH,
66 SPU_OP_HMAC,
67 SPU_OP_AEAD,
68 SPU_OP_NUM
69};
70
71enum spu_spu_type {
72 SPU_TYPE_SPUM,
73 SPU_TYPE_SPU2,
74};
75
76/*
77 * SPUM_NS2 and SPUM_NSP are the SPU-M block on Northstar 2 and Northstar Plus,
78 * respectively.
79 */
80enum spu_spu_subtype {
81 SPU_SUBTYPE_SPUM_NS2,
82 SPU_SUBTYPE_SPUM_NSP,
83 SPU_SUBTYPE_SPU2_V1,
84 SPU_SUBTYPE_SPU2_V2
85};
86
87struct spu_type_subtype {
88 enum spu_spu_type type;
89 enum spu_spu_subtype subtype;
90};
91
92struct cipher_op {
93 enum spu_cipher_alg alg;
94 enum spu_cipher_mode mode;
95};
96
97struct auth_op {
98 enum hash_alg alg;
99 enum hash_mode mode;
100};
101
102struct iproc_alg_s {
103 u32 type;
104 union {
105 struct crypto_alg crypto;
106 struct ahash_alg hash;
107 struct aead_alg aead;
108 } alg;
109 struct cipher_op cipher_info;
110 struct auth_op auth_info;
111 bool auth_first;
112 bool registered;
113};
114
115/*
116 * Buffers for a SPU request/reply message pair. All part of one structure to
117 * allow a single alloc per request.
118 */
119struct spu_msg_buf {
120 /* Request message fragments */
121
122 /*
123 * SPU request message header. For SPU-M, holds MH, EMH, SCTX, BDESC,
124 * and BD header. For SPU2, holds FMD, OMD.
125 */
126 u8 bcm_spu_req_hdr[ALIGN(SPU2_HEADER_ALLOC_LEN, SPU_MSG_ALIGN)];
127
128 /* IV or counter. Size to include salt. Also used for XTS tweek. */
129 u8 iv_ctr[ALIGN(2 * AES_BLOCK_SIZE, SPU_MSG_ALIGN)];
130
131 /* Hash digest. request and response. */
132 u8 digest[ALIGN(MAX_DIGEST_SIZE, SPU_MSG_ALIGN)];
133
134 /* SPU request message padding */
135 u8 spu_req_pad[ALIGN(SPU_PAD_LEN_MAX, SPU_MSG_ALIGN)];
136
137 /* SPU-M request message STATUS field */
138 u8 tx_stat[ALIGN(SPU_TX_STATUS_LEN, SPU_MSG_ALIGN)];
139
140 /* Response message fragments */
141
142 /* SPU response message header */
143 u8 spu_resp_hdr[ALIGN(SPU2_HEADER_ALLOC_LEN, SPU_MSG_ALIGN)];
144
145 /* SPU response message STATUS field padding */
146 u8 rx_stat_pad[ALIGN(SPU_STAT_PAD_MAX, SPU_MSG_ALIGN)];
147
148 /* SPU response message STATUS field */
149 u8 rx_stat[ALIGN(SPU_RX_STATUS_LEN, SPU_MSG_ALIGN)];
150
151 union {
152 /* Buffers only used for ablkcipher */
153 struct {
154 /*
155 * Field used for either SUPDT when RC4 is used
156 * -OR- tweak value when XTS/AES is used
157 */
158 u8 supdt_tweak[ALIGN(SPU_SUPDT_LEN, SPU_MSG_ALIGN)];
159 } c;
160
161 /* Buffers only used for aead */
162 struct {
163 /* SPU response pad for GCM data */
164 u8 gcmpad[ALIGN(AES_BLOCK_SIZE, SPU_MSG_ALIGN)];
165
166 /* SPU request msg padding for GCM AAD */
167 u8 req_aad_pad[ALIGN(SPU_PAD_LEN_MAX, SPU_MSG_ALIGN)];
168
169 /* SPU response data to be discarded */
170 u8 resp_aad[ALIGN(MAX_ASSOC_SIZE + MAX_IV_SIZE,
171 SPU_MSG_ALIGN)];
172 } a;
173 };
174};
175
176struct iproc_ctx_s {
177 u8 enckey[MAX_KEY_SIZE + ARC4_STATE_SIZE];
178 unsigned int enckeylen;
179
180 u8 authkey[MAX_KEY_SIZE + ARC4_STATE_SIZE];
181 unsigned int authkeylen;
182
183 u8 salt[MAX_SALT_SIZE];
184 unsigned int salt_len;
185 unsigned int salt_offset;
186 u8 iv[MAX_IV_SIZE];
187
188 unsigned int digestsize;
189
190 struct iproc_alg_s *alg;
191 bool is_esp;
192
193 struct cipher_op cipher;
194 enum spu_cipher_type cipher_type;
195
196 struct auth_op auth;
197 bool auth_first;
198
199 /*
200 * The maximum length in bytes of the payload in a SPU message for this
201 * context. For SPU-M, the payload is the combination of AAD and data.
202 * For SPU2, the payload is just data. A value of SPU_MAX_PAYLOAD_INF
203 * indicates that there is no limit to the length of the SPU message
204 * payload.
205 */
206 unsigned int max_payload;
207
208 struct crypto_aead *fallback_cipher;
209
210 /* auth_type is determined during processing of request */
211
212 u8 ipad[MAX_HASH_BLOCK_SIZE];
213 u8 opad[MAX_HASH_BLOCK_SIZE];
214
215 /*
216 * Buffer to hold SPU message header template. Template is created at
217 * setkey time for ablkcipher requests, since most of the fields in the
218 * header are known at that time. At request time, just fill in a few
219 * missing pieces related to length of data in the request and IVs, etc.
220 */
221 u8 bcm_spu_req_hdr[ALIGN(SPU2_HEADER_ALLOC_LEN, SPU_MSG_ALIGN)];
222
223 /* Length of SPU request header */
224 u16 spu_req_hdr_len;
225
226 /* Expected length of SPU response header */
227 u16 spu_resp_hdr_len;
228
229 /*
230 * shash descriptor - needed to perform incremental hashing in
231 * in software, when hw doesn't support it.
232 */
233 struct shash_desc *shash;
234
235 bool is_rfc4543; /* RFC 4543 style of GMAC */
236};
237
238/* state from iproc_reqctx_s necessary for hash state export/import */
239struct spu_hash_export_s {
240 unsigned int total_todo;
241 unsigned int total_sent;
242 u8 hash_carry[HASH_CARRY_MAX];
243 unsigned int hash_carry_len;
244 u8 incr_hash[MAX_DIGEST_SIZE];
245 bool is_sw_hmac;
246};
247
248struct iproc_reqctx_s {
249 /* general context */
250 struct crypto_async_request *parent;
251
252 /* only valid after enqueue() */
253 struct iproc_ctx_s *ctx;
254
255 u8 chan_idx; /* Mailbox channel to be used to submit this request */
256
257 /* total todo, rx'd, and sent for this request */
258 unsigned int total_todo;
259 unsigned int total_received; /* only valid for ablkcipher */
260 unsigned int total_sent;
261
262 /*
263 * num bytes sent to hw from the src sg in this request. This can differ
264 * from total_sent for incremental hashing. total_sent includes previous
265 * init() and update() data. src_sent does not.
266 */
267 unsigned int src_sent;
268
269 /*
270 * For AEAD requests, start of associated data. This will typically
271 * point to the beginning of the src scatterlist from the request,
272 * since assoc data is at the beginning of the src scatterlist rather
273 * than in its own sg.
274 */
275 struct scatterlist *assoc;
276
277 /*
278 * scatterlist entry and offset to start of data for next chunk. Crypto
279 * API src scatterlist for AEAD starts with AAD, if present. For first
280 * chunk, src_sg is sg entry at beginning of input data (after AAD).
281 * src_skip begins at the offset in that sg entry where data begins.
282 */
283 struct scatterlist *src_sg;
284 int src_nents; /* Number of src entries with data */
285 u32 src_skip; /* bytes of current sg entry already used */
286
287 /*
288 * Same for destination. For AEAD, if there is AAD, output data must
289 * be written at offset following AAD.
290 */
291 struct scatterlist *dst_sg;
292 int dst_nents; /* Number of dst entries with data */
293 u32 dst_skip; /* bytes of current sg entry already written */
294
295 /* Mailbox message used to send this request to PDC driver */
296 struct brcm_message mb_mssg;
297
298 bool bd_suppress; /* suppress BD field in SPU response? */
299
300 /* cipher context */
301 bool is_encrypt;
302
303 /*
304 * CBC mode: IV. CTR mode: counter. Else empty. Used as a DMA
305 * buffer for AEAD requests. So allocate as DMAable memory. If IV
306 * concatenated with salt, includes the salt.
307 */
308 u8 *iv_ctr;
309 /* Length of IV or counter, in bytes */
310 unsigned int iv_ctr_len;
311
312 /*
313 * Hash requests can be of any size, whether initial, update, or final.
314 * A non-final request must be submitted to the SPU as an integral
315 * number of blocks. This may leave data at the end of the request
316 * that is not a full block. Since the request is non-final, it cannot
317 * be padded. So, we write the remainder to this hash_carry buffer and
318 * hold it until the next request arrives. The carry data is then
319 * submitted at the beginning of the data in the next SPU msg.
320 * hash_carry_len is the number of bytes currently in hash_carry. These
321 * fields are only used for ahash requests.
322 */
323 u8 hash_carry[HASH_CARRY_MAX];
324 unsigned int hash_carry_len;
325 unsigned int is_final; /* is this the final for the hash op? */
326
327 /*
328 * Digest from incremental hash is saved here to include in next hash
329 * operation. Cannot be stored in req->result for truncated hashes,
330 * since result may be sized for final digest. Cannot be saved in
331 * msg_buf because that gets deleted between incremental hash ops
332 * and is not saved as part of export().
333 */
334 u8 incr_hash[MAX_DIGEST_SIZE];
335
336 /* hmac context */
337 bool is_sw_hmac;
338
339 /* aead context */
340 struct crypto_tfm *old_tfm;
341 crypto_completion_t old_complete;
342 void *old_data;
343
344 gfp_t gfp;
345
346 /* Buffers used to build SPU request and response messages */
347 struct spu_msg_buf msg_buf;
348};
349
350/*
351 * Structure encapsulates a set of function pointers specific to the type of
352 * SPU hardware running. These functions handling creation and parsing of
353 * SPU request messages and SPU response messages. Includes hardware-specific
354 * values read from device tree.
355 */
356struct spu_hw {
357 void (*spu_dump_msg_hdr)(u8 *buf, unsigned int buf_len);
358 u32 (*spu_ctx_max_payload)(enum spu_cipher_alg cipher_alg,
359 enum spu_cipher_mode cipher_mode,
360 unsigned int blocksize);
361 u32 (*spu_payload_length)(u8 *spu_hdr);
362 u16 (*spu_response_hdr_len)(u16 auth_key_len, u16 enc_key_len,
363 bool is_hash);
364 u16 (*spu_hash_pad_len)(enum hash_alg hash_alg,
365 enum hash_mode hash_mode, u32 chunksize,
366 u16 hash_block_size);
367 u32 (*spu_gcm_ccm_pad_len)(enum spu_cipher_mode cipher_mode,
368 unsigned int data_size);
369 u32 (*spu_assoc_resp_len)(enum spu_cipher_mode cipher_mode,
370 unsigned int assoc_len,
371 unsigned int iv_len, bool is_encrypt);
372 u8 (*spu_aead_ivlen)(enum spu_cipher_mode cipher_mode,
373 u16 iv_len);
374 enum hash_type (*spu_hash_type)(u32 src_sent);
375 u32 (*spu_digest_size)(u32 digest_size, enum hash_alg alg,
376 enum hash_type);
377 u32 (*spu_create_request)(u8 *spu_hdr,
378 struct spu_request_opts *req_opts,
379 struct spu_cipher_parms *cipher_parms,
380 struct spu_hash_parms *hash_parms,
381 struct spu_aead_parms *aead_parms,
382 unsigned int data_size);
383 u16 (*spu_cipher_req_init)(u8 *spu_hdr,
384 struct spu_cipher_parms *cipher_parms);
385 void (*spu_cipher_req_finish)(u8 *spu_hdr,
386 u16 spu_req_hdr_len,
387 unsigned int is_inbound,
388 struct spu_cipher_parms *cipher_parms,
389 bool update_key,
390 unsigned int data_size);
391 void (*spu_request_pad)(u8 *pad_start, u32 gcm_padding,
392 u32 hash_pad_len, enum hash_alg auth_alg,
393 enum hash_mode auth_mode,
394 unsigned int total_sent, u32 status_padding);
395 u8 (*spu_xts_tweak_in_payload)(void);
396 u8 (*spu_tx_status_len)(void);
397 u8 (*spu_rx_status_len)(void);
398 int (*spu_status_process)(u8 *statp);
399 void (*spu_ccm_update_iv)(unsigned int digestsize,
400 struct spu_cipher_parms *cipher_parms,
401 unsigned int assoclen, unsigned int chunksize,
402 bool is_encrypt, bool is_esp);
403 u32 (*spu_wordalign_padlen)(u32 data_size);
404
405 /* The base virtual address of the SPU hw registers */
406 void __iomem *reg_vbase[MAX_SPUS];
407
408 /* Version of the SPU hardware */
409 enum spu_spu_type spu_type;
410
411 /* Sub-version of the SPU hardware */
412 enum spu_spu_subtype spu_subtype;
413
414 /* The number of SPUs on this platform */
415 u32 num_spu;
416
417 /* The number of SPU channels on this platform */
418 u32 num_chan;
419};
420
421struct device_private {
422 struct platform_device *pdev;
423
424 struct spu_hw spu;
425
426 atomic_t session_count; /* number of streams active */
427 atomic_t stream_count; /* monotonic counter for streamID's */
428
429 /* Length of BCM header. Set to 0 when hw does not expect BCM HEADER. */
430 u8 bcm_hdr_len;
431
432 /* The index of the channel to use for the next crypto request */
433 atomic_t next_chan;
434
435 struct dentry *debugfs_dir;
436 struct dentry *debugfs_stats;
437
438 /* Number of request bytes processed and result bytes returned */
439 atomic64_t bytes_in;
440 atomic64_t bytes_out;
441
442 /* Number of operations of each type */
443 atomic_t op_counts[SPU_OP_NUM];
444
445 atomic_t cipher_cnt[CIPHER_ALG_LAST][CIPHER_MODE_LAST];
446 atomic_t hash_cnt[HASH_ALG_LAST];
447 atomic_t hmac_cnt[HASH_ALG_LAST];
448 atomic_t aead_cnt[AEAD_TYPE_LAST];
449
450 /* Number of calls to setkey() for each operation type */
451 atomic_t setkey_cnt[SPU_OP_NUM];
452
453 /* Number of times request was resubmitted because mb was full */
454 atomic_t mb_no_spc;
455
456 /* Number of mailbox send failures */
457 atomic_t mb_send_fail;
458
459 /* Number of ICV check failures for AEAD messages */
460 atomic_t bad_icv;
461
462 struct mbox_client mcl;
463
464 /* Array of mailbox channel pointers, one for each channel */
465 struct mbox_chan **mbox;
466};
467
468extern struct device_private iproc_priv;
469
470#endif