Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support.
3 *
4 * Copyright (C) 2010, 2011 David S. Miller <davem@davemloft.net>
5 */
6
7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/of.h>
12#include <linux/of_address.h>
13#include <linux/platform_device.h>
14#include <linux/cpumask.h>
15#include <linux/slab.h>
16#include <linux/interrupt.h>
17#include <linux/crypto.h>
18#include <crypto/md5.h>
19#include <crypto/sha1.h>
20#include <crypto/sha2.h>
21#include <crypto/aes.h>
22#include <crypto/internal/des.h>
23#include <linux/mutex.h>
24#include <linux/delay.h>
25#include <linux/sched.h>
26
27#include <crypto/internal/hash.h>
28#include <crypto/internal/skcipher.h>
29#include <crypto/scatterwalk.h>
30#include <crypto/algapi.h>
31
32#include <asm/hypervisor.h>
33#include <asm/mdesc.h>
34
35#include "n2_core.h"
36
37#define DRV_MODULE_NAME "n2_crypto"
38#define DRV_MODULE_VERSION "0.2"
39#define DRV_MODULE_RELDATE "July 28, 2011"
40
41static const char version[] =
42 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
43
44MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
45MODULE_DESCRIPTION("Niagara2 Crypto driver");
46MODULE_LICENSE("GPL");
47MODULE_VERSION(DRV_MODULE_VERSION);
48
49#define N2_CRA_PRIORITY 200
50
51static DEFINE_MUTEX(spu_lock);
52
53struct spu_queue {
54 cpumask_t sharing;
55 unsigned long qhandle;
56
57 spinlock_t lock;
58 u8 q_type;
59 void *q;
60 unsigned long head;
61 unsigned long tail;
62 struct list_head jobs;
63
64 unsigned long devino;
65
66 char irq_name[32];
67 unsigned int irq;
68
69 struct list_head list;
70};
71
72struct spu_qreg {
73 struct spu_queue *queue;
74 unsigned long type;
75};
76
77static struct spu_queue **cpu_to_cwq;
78static struct spu_queue **cpu_to_mau;
79
80static unsigned long spu_next_offset(struct spu_queue *q, unsigned long off)
81{
82 if (q->q_type == HV_NCS_QTYPE_MAU) {
83 off += MAU_ENTRY_SIZE;
84 if (off == (MAU_ENTRY_SIZE * MAU_NUM_ENTRIES))
85 off = 0;
86 } else {
87 off += CWQ_ENTRY_SIZE;
88 if (off == (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES))
89 off = 0;
90 }
91 return off;
92}
93
94struct n2_request_common {
95 struct list_head entry;
96 unsigned int offset;
97};
98#define OFFSET_NOT_RUNNING (~(unsigned int)0)
99
100/* An async job request records the final tail value it used in
101 * n2_request_common->offset, test to see if that offset is in
102 * the range old_head, new_head, inclusive.
103 */
104static inline bool job_finished(struct spu_queue *q, unsigned int offset,
105 unsigned long old_head, unsigned long new_head)
106{
107 if (old_head <= new_head) {
108 if (offset > old_head && offset <= new_head)
109 return true;
110 } else {
111 if (offset > old_head || offset <= new_head)
112 return true;
113 }
114 return false;
115}
116
117/* When the HEAD marker is unequal to the actual HEAD, we get
118 * a virtual device INO interrupt. We should process the
119 * completed CWQ entries and adjust the HEAD marker to clear
120 * the IRQ.
121 */
122static irqreturn_t cwq_intr(int irq, void *dev_id)
123{
124 unsigned long off, new_head, hv_ret;
125 struct spu_queue *q = dev_id;
126
127 pr_err("CPU[%d]: Got CWQ interrupt for qhdl[%lx]\n",
128 smp_processor_id(), q->qhandle);
129
130 spin_lock(&q->lock);
131
132 hv_ret = sun4v_ncs_gethead(q->qhandle, &new_head);
133
134 pr_err("CPU[%d]: CWQ gethead[%lx] hv_ret[%lu]\n",
135 smp_processor_id(), new_head, hv_ret);
136
137 for (off = q->head; off != new_head; off = spu_next_offset(q, off)) {
138 /* XXX ... XXX */
139 }
140
141 hv_ret = sun4v_ncs_sethead_marker(q->qhandle, new_head);
142 if (hv_ret == HV_EOK)
143 q->head = new_head;
144
145 spin_unlock(&q->lock);
146
147 return IRQ_HANDLED;
148}
149
150static irqreturn_t mau_intr(int irq, void *dev_id)
151{
152 struct spu_queue *q = dev_id;
153 unsigned long head, hv_ret;
154
155 spin_lock(&q->lock);
156
157 pr_err("CPU[%d]: Got MAU interrupt for qhdl[%lx]\n",
158 smp_processor_id(), q->qhandle);
159
160 hv_ret = sun4v_ncs_gethead(q->qhandle, &head);
161
162 pr_err("CPU[%d]: MAU gethead[%lx] hv_ret[%lu]\n",
163 smp_processor_id(), head, hv_ret);
164
165 sun4v_ncs_sethead_marker(q->qhandle, head);
166
167 spin_unlock(&q->lock);
168
169 return IRQ_HANDLED;
170}
171
172static void *spu_queue_next(struct spu_queue *q, void *cur)
173{
174 return q->q + spu_next_offset(q, cur - q->q);
175}
176
177static int spu_queue_num_free(struct spu_queue *q)
178{
179 unsigned long head = q->head;
180 unsigned long tail = q->tail;
181 unsigned long end = (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES);
182 unsigned long diff;
183
184 if (head > tail)
185 diff = head - tail;
186 else
187 diff = (end - tail) + head;
188
189 return (diff / CWQ_ENTRY_SIZE) - 1;
190}
191
192static void *spu_queue_alloc(struct spu_queue *q, int num_entries)
193{
194 int avail = spu_queue_num_free(q);
195
196 if (avail >= num_entries)
197 return q->q + q->tail;
198
199 return NULL;
200}
201
202static unsigned long spu_queue_submit(struct spu_queue *q, void *last)
203{
204 unsigned long hv_ret, new_tail;
205
206 new_tail = spu_next_offset(q, last - q->q);
207
208 hv_ret = sun4v_ncs_settail(q->qhandle, new_tail);
209 if (hv_ret == HV_EOK)
210 q->tail = new_tail;
211 return hv_ret;
212}
213
214static u64 control_word_base(unsigned int len, unsigned int hmac_key_len,
215 int enc_type, int auth_type,
216 unsigned int hash_len,
217 bool sfas, bool sob, bool eob, bool encrypt,
218 int opcode)
219{
220 u64 word = (len - 1) & CONTROL_LEN;
221
222 word |= ((u64) opcode << CONTROL_OPCODE_SHIFT);
223 word |= ((u64) enc_type << CONTROL_ENC_TYPE_SHIFT);
224 word |= ((u64) auth_type << CONTROL_AUTH_TYPE_SHIFT);
225 if (sfas)
226 word |= CONTROL_STORE_FINAL_AUTH_STATE;
227 if (sob)
228 word |= CONTROL_START_OF_BLOCK;
229 if (eob)
230 word |= CONTROL_END_OF_BLOCK;
231 if (encrypt)
232 word |= CONTROL_ENCRYPT;
233 if (hmac_key_len)
234 word |= ((u64) (hmac_key_len - 1)) << CONTROL_HMAC_KEY_LEN_SHIFT;
235 if (hash_len)
236 word |= ((u64) (hash_len - 1)) << CONTROL_HASH_LEN_SHIFT;
237
238 return word;
239}
240
241#if 0
242static inline bool n2_should_run_async(struct spu_queue *qp, int this_len)
243{
244 if (this_len >= 64 ||
245 qp->head != qp->tail)
246 return true;
247 return false;
248}
249#endif
250
251struct n2_ahash_alg {
252 struct list_head entry;
253 const u8 *hash_zero;
254 const u8 *hash_init;
255 u8 hw_op_hashsz;
256 u8 digest_size;
257 u8 auth_type;
258 u8 hmac_type;
259 struct ahash_alg alg;
260};
261
262static inline struct n2_ahash_alg *n2_ahash_alg(struct crypto_tfm *tfm)
263{
264 struct crypto_alg *alg = tfm->__crt_alg;
265 struct ahash_alg *ahash_alg;
266
267 ahash_alg = container_of(alg, struct ahash_alg, halg.base);
268
269 return container_of(ahash_alg, struct n2_ahash_alg, alg);
270}
271
272struct n2_hmac_alg {
273 const char *child_alg;
274 struct n2_ahash_alg derived;
275};
276
277static inline struct n2_hmac_alg *n2_hmac_alg(struct crypto_tfm *tfm)
278{
279 struct crypto_alg *alg = tfm->__crt_alg;
280 struct ahash_alg *ahash_alg;
281
282 ahash_alg = container_of(alg, struct ahash_alg, halg.base);
283
284 return container_of(ahash_alg, struct n2_hmac_alg, derived.alg);
285}
286
287struct n2_hash_ctx {
288 struct crypto_ahash *fallback_tfm;
289};
290
291#define N2_HASH_KEY_MAX 32 /* HW limit for all HMAC requests */
292
293struct n2_hmac_ctx {
294 struct n2_hash_ctx base;
295
296 struct crypto_shash *child_shash;
297
298 int hash_key_len;
299 unsigned char hash_key[N2_HASH_KEY_MAX];
300};
301
302struct n2_hash_req_ctx {
303 union {
304 struct md5_state md5;
305 struct sha1_state sha1;
306 struct sha256_state sha256;
307 } u;
308
309 struct ahash_request fallback_req;
310};
311
312static int n2_hash_async_init(struct ahash_request *req)
313{
314 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
315 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
316 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
317
318 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
319 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
320
321 return crypto_ahash_init(&rctx->fallback_req);
322}
323
324static int n2_hash_async_update(struct ahash_request *req)
325{
326 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
327 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
328 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
329
330 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
331 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
332 rctx->fallback_req.nbytes = req->nbytes;
333 rctx->fallback_req.src = req->src;
334
335 return crypto_ahash_update(&rctx->fallback_req);
336}
337
338static int n2_hash_async_final(struct ahash_request *req)
339{
340 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
341 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
342 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
343
344 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
345 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
346 rctx->fallback_req.result = req->result;
347
348 return crypto_ahash_final(&rctx->fallback_req);
349}
350
351static int n2_hash_async_finup(struct ahash_request *req)
352{
353 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
354 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
355 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
356
357 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
358 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
359 rctx->fallback_req.nbytes = req->nbytes;
360 rctx->fallback_req.src = req->src;
361 rctx->fallback_req.result = req->result;
362
363 return crypto_ahash_finup(&rctx->fallback_req);
364}
365
366static int n2_hash_async_noimport(struct ahash_request *req, const void *in)
367{
368 return -ENOSYS;
369}
370
371static int n2_hash_async_noexport(struct ahash_request *req, void *out)
372{
373 return -ENOSYS;
374}
375
376static int n2_hash_cra_init(struct crypto_tfm *tfm)
377{
378 const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
379 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
380 struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
381 struct crypto_ahash *fallback_tfm;
382 int err;
383
384 fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
385 CRYPTO_ALG_NEED_FALLBACK);
386 if (IS_ERR(fallback_tfm)) {
387 pr_warn("Fallback driver '%s' could not be loaded!\n",
388 fallback_driver_name);
389 err = PTR_ERR(fallback_tfm);
390 goto out;
391 }
392
393 crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
394 crypto_ahash_reqsize(fallback_tfm)));
395
396 ctx->fallback_tfm = fallback_tfm;
397 return 0;
398
399out:
400 return err;
401}
402
403static void n2_hash_cra_exit(struct crypto_tfm *tfm)
404{
405 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
406 struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
407
408 crypto_free_ahash(ctx->fallback_tfm);
409}
410
411static int n2_hmac_cra_init(struct crypto_tfm *tfm)
412{
413 const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
414 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
415 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
416 struct n2_hmac_alg *n2alg = n2_hmac_alg(tfm);
417 struct crypto_ahash *fallback_tfm;
418 struct crypto_shash *child_shash;
419 int err;
420
421 fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
422 CRYPTO_ALG_NEED_FALLBACK);
423 if (IS_ERR(fallback_tfm)) {
424 pr_warn("Fallback driver '%s' could not be loaded!\n",
425 fallback_driver_name);
426 err = PTR_ERR(fallback_tfm);
427 goto out;
428 }
429
430 child_shash = crypto_alloc_shash(n2alg->child_alg, 0, 0);
431 if (IS_ERR(child_shash)) {
432 pr_warn("Child shash '%s' could not be loaded!\n",
433 n2alg->child_alg);
434 err = PTR_ERR(child_shash);
435 goto out_free_fallback;
436 }
437
438 crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
439 crypto_ahash_reqsize(fallback_tfm)));
440
441 ctx->child_shash = child_shash;
442 ctx->base.fallback_tfm = fallback_tfm;
443 return 0;
444
445out_free_fallback:
446 crypto_free_ahash(fallback_tfm);
447
448out:
449 return err;
450}
451
452static void n2_hmac_cra_exit(struct crypto_tfm *tfm)
453{
454 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
455 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
456
457 crypto_free_ahash(ctx->base.fallback_tfm);
458 crypto_free_shash(ctx->child_shash);
459}
460
461static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key,
462 unsigned int keylen)
463{
464 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
465 struct crypto_shash *child_shash = ctx->child_shash;
466 struct crypto_ahash *fallback_tfm;
467 int err, bs, ds;
468
469 fallback_tfm = ctx->base.fallback_tfm;
470 err = crypto_ahash_setkey(fallback_tfm, key, keylen);
471 if (err)
472 return err;
473
474 bs = crypto_shash_blocksize(child_shash);
475 ds = crypto_shash_digestsize(child_shash);
476 BUG_ON(ds > N2_HASH_KEY_MAX);
477 if (keylen > bs) {
478 err = crypto_shash_tfm_digest(child_shash, key, keylen,
479 ctx->hash_key);
480 if (err)
481 return err;
482 keylen = ds;
483 } else if (keylen <= N2_HASH_KEY_MAX)
484 memcpy(ctx->hash_key, key, keylen);
485
486 ctx->hash_key_len = keylen;
487
488 return err;
489}
490
491static unsigned long wait_for_tail(struct spu_queue *qp)
492{
493 unsigned long head, hv_ret;
494
495 do {
496 hv_ret = sun4v_ncs_gethead(qp->qhandle, &head);
497 if (hv_ret != HV_EOK) {
498 pr_err("Hypervisor error on gethead\n");
499 break;
500 }
501 if (head == qp->tail) {
502 qp->head = head;
503 break;
504 }
505 } while (1);
506 return hv_ret;
507}
508
509static unsigned long submit_and_wait_for_tail(struct spu_queue *qp,
510 struct cwq_initial_entry *ent)
511{
512 unsigned long hv_ret = spu_queue_submit(qp, ent);
513
514 if (hv_ret == HV_EOK)
515 hv_ret = wait_for_tail(qp);
516
517 return hv_ret;
518}
519
520static int n2_do_async_digest(struct ahash_request *req,
521 unsigned int auth_type, unsigned int digest_size,
522 unsigned int result_size, void *hash_loc,
523 unsigned long auth_key, unsigned int auth_key_len)
524{
525 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
526 struct cwq_initial_entry *ent;
527 struct crypto_hash_walk walk;
528 struct spu_queue *qp;
529 unsigned long flags;
530 int err = -ENODEV;
531 int nbytes, cpu;
532
533 /* The total effective length of the operation may not
534 * exceed 2^16.
535 */
536 if (unlikely(req->nbytes > (1 << 16))) {
537 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
538 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
539
540 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
541 rctx->fallback_req.base.flags =
542 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
543 rctx->fallback_req.nbytes = req->nbytes;
544 rctx->fallback_req.src = req->src;
545 rctx->fallback_req.result = req->result;
546
547 return crypto_ahash_digest(&rctx->fallback_req);
548 }
549
550 nbytes = crypto_hash_walk_first(req, &walk);
551
552 cpu = get_cpu();
553 qp = cpu_to_cwq[cpu];
554 if (!qp)
555 goto out;
556
557 spin_lock_irqsave(&qp->lock, flags);
558
559 /* XXX can do better, improve this later by doing a by-hand scatterlist
560 * XXX walk, etc.
561 */
562 ent = qp->q + qp->tail;
563
564 ent->control = control_word_base(nbytes, auth_key_len, 0,
565 auth_type, digest_size,
566 false, true, false, false,
567 OPCODE_INPLACE_BIT |
568 OPCODE_AUTH_MAC);
569 ent->src_addr = __pa(walk.data);
570 ent->auth_key_addr = auth_key;
571 ent->auth_iv_addr = __pa(hash_loc);
572 ent->final_auth_state_addr = 0UL;
573 ent->enc_key_addr = 0UL;
574 ent->enc_iv_addr = 0UL;
575 ent->dest_addr = __pa(hash_loc);
576
577 nbytes = crypto_hash_walk_done(&walk, 0);
578 while (nbytes > 0) {
579 ent = spu_queue_next(qp, ent);
580
581 ent->control = (nbytes - 1);
582 ent->src_addr = __pa(walk.data);
583 ent->auth_key_addr = 0UL;
584 ent->auth_iv_addr = 0UL;
585 ent->final_auth_state_addr = 0UL;
586 ent->enc_key_addr = 0UL;
587 ent->enc_iv_addr = 0UL;
588 ent->dest_addr = 0UL;
589
590 nbytes = crypto_hash_walk_done(&walk, 0);
591 }
592 ent->control |= CONTROL_END_OF_BLOCK;
593
594 if (submit_and_wait_for_tail(qp, ent) != HV_EOK)
595 err = -EINVAL;
596 else
597 err = 0;
598
599 spin_unlock_irqrestore(&qp->lock, flags);
600
601 if (!err)
602 memcpy(req->result, hash_loc, result_size);
603out:
604 put_cpu();
605
606 return err;
607}
608
609static int n2_hash_async_digest(struct ahash_request *req)
610{
611 struct n2_ahash_alg *n2alg = n2_ahash_alg(req->base.tfm);
612 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
613 int ds;
614
615 ds = n2alg->digest_size;
616 if (unlikely(req->nbytes == 0)) {
617 memcpy(req->result, n2alg->hash_zero, ds);
618 return 0;
619 }
620 memcpy(&rctx->u, n2alg->hash_init, n2alg->hw_op_hashsz);
621
622 return n2_do_async_digest(req, n2alg->auth_type,
623 n2alg->hw_op_hashsz, ds,
624 &rctx->u, 0UL, 0);
625}
626
627static int n2_hmac_async_digest(struct ahash_request *req)
628{
629 struct n2_hmac_alg *n2alg = n2_hmac_alg(req->base.tfm);
630 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
631 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
632 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
633 int ds;
634
635 ds = n2alg->derived.digest_size;
636 if (unlikely(req->nbytes == 0) ||
637 unlikely(ctx->hash_key_len > N2_HASH_KEY_MAX)) {
638 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
639 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
640
641 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
642 rctx->fallback_req.base.flags =
643 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
644 rctx->fallback_req.nbytes = req->nbytes;
645 rctx->fallback_req.src = req->src;
646 rctx->fallback_req.result = req->result;
647
648 return crypto_ahash_digest(&rctx->fallback_req);
649 }
650 memcpy(&rctx->u, n2alg->derived.hash_init,
651 n2alg->derived.hw_op_hashsz);
652
653 return n2_do_async_digest(req, n2alg->derived.hmac_type,
654 n2alg->derived.hw_op_hashsz, ds,
655 &rctx->u,
656 __pa(&ctx->hash_key),
657 ctx->hash_key_len);
658}
659
660struct n2_skcipher_context {
661 int key_len;
662 int enc_type;
663 union {
664 u8 aes[AES_MAX_KEY_SIZE];
665 u8 des[DES_KEY_SIZE];
666 u8 des3[3 * DES_KEY_SIZE];
667 } key;
668};
669
670#define N2_CHUNK_ARR_LEN 16
671
672struct n2_crypto_chunk {
673 struct list_head entry;
674 unsigned long iv_paddr : 44;
675 unsigned long arr_len : 20;
676 unsigned long dest_paddr;
677 unsigned long dest_final;
678 struct {
679 unsigned long src_paddr : 44;
680 unsigned long src_len : 20;
681 } arr[N2_CHUNK_ARR_LEN];
682};
683
684struct n2_request_context {
685 struct skcipher_walk walk;
686 struct list_head chunk_list;
687 struct n2_crypto_chunk chunk;
688 u8 temp_iv[16];
689};
690
691/* The SPU allows some level of flexibility for partial cipher blocks
692 * being specified in a descriptor.
693 *
694 * It merely requires that every descriptor's length field is at least
695 * as large as the cipher block size. This means that a cipher block
696 * can span at most 2 descriptors. However, this does not allow a
697 * partial block to span into the final descriptor as that would
698 * violate the rule (since every descriptor's length must be at lest
699 * the block size). So, for example, assuming an 8 byte block size:
700 *
701 * 0xe --> 0xa --> 0x8
702 *
703 * is a valid length sequence, whereas:
704 *
705 * 0xe --> 0xb --> 0x7
706 *
707 * is not a valid sequence.
708 */
709
710struct n2_skcipher_alg {
711 struct list_head entry;
712 u8 enc_type;
713 struct skcipher_alg skcipher;
714};
715
716static inline struct n2_skcipher_alg *n2_skcipher_alg(struct crypto_skcipher *tfm)
717{
718 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
719
720 return container_of(alg, struct n2_skcipher_alg, skcipher);
721}
722
723struct n2_skcipher_request_context {
724 struct skcipher_walk walk;
725};
726
727static int n2_aes_setkey(struct crypto_skcipher *skcipher, const u8 *key,
728 unsigned int keylen)
729{
730 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
731 struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
732 struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
733
734 ctx->enc_type = (n2alg->enc_type & ENC_TYPE_CHAINING_MASK);
735
736 switch (keylen) {
737 case AES_KEYSIZE_128:
738 ctx->enc_type |= ENC_TYPE_ALG_AES128;
739 break;
740 case AES_KEYSIZE_192:
741 ctx->enc_type |= ENC_TYPE_ALG_AES192;
742 break;
743 case AES_KEYSIZE_256:
744 ctx->enc_type |= ENC_TYPE_ALG_AES256;
745 break;
746 default:
747 return -EINVAL;
748 }
749
750 ctx->key_len = keylen;
751 memcpy(ctx->key.aes, key, keylen);
752 return 0;
753}
754
755static int n2_des_setkey(struct crypto_skcipher *skcipher, const u8 *key,
756 unsigned int keylen)
757{
758 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
759 struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
760 struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
761 int err;
762
763 err = verify_skcipher_des_key(skcipher, key);
764 if (err)
765 return err;
766
767 ctx->enc_type = n2alg->enc_type;
768
769 ctx->key_len = keylen;
770 memcpy(ctx->key.des, key, keylen);
771 return 0;
772}
773
774static int n2_3des_setkey(struct crypto_skcipher *skcipher, const u8 *key,
775 unsigned int keylen)
776{
777 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
778 struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
779 struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
780 int err;
781
782 err = verify_skcipher_des3_key(skcipher, key);
783 if (err)
784 return err;
785
786 ctx->enc_type = n2alg->enc_type;
787
788 ctx->key_len = keylen;
789 memcpy(ctx->key.des3, key, keylen);
790 return 0;
791}
792
793static inline int skcipher_descriptor_len(int nbytes, unsigned int block_size)
794{
795 int this_len = nbytes;
796
797 this_len -= (nbytes & (block_size - 1));
798 return this_len > (1 << 16) ? (1 << 16) : this_len;
799}
800
801static int __n2_crypt_chunk(struct crypto_skcipher *skcipher,
802 struct n2_crypto_chunk *cp,
803 struct spu_queue *qp, bool encrypt)
804{
805 struct n2_skcipher_context *ctx = crypto_skcipher_ctx(skcipher);
806 struct cwq_initial_entry *ent;
807 bool in_place;
808 int i;
809
810 ent = spu_queue_alloc(qp, cp->arr_len);
811 if (!ent) {
812 pr_info("queue_alloc() of %d fails\n",
813 cp->arr_len);
814 return -EBUSY;
815 }
816
817 in_place = (cp->dest_paddr == cp->arr[0].src_paddr);
818
819 ent->control = control_word_base(cp->arr[0].src_len,
820 0, ctx->enc_type, 0, 0,
821 false, true, false, encrypt,
822 OPCODE_ENCRYPT |
823 (in_place ? OPCODE_INPLACE_BIT : 0));
824 ent->src_addr = cp->arr[0].src_paddr;
825 ent->auth_key_addr = 0UL;
826 ent->auth_iv_addr = 0UL;
827 ent->final_auth_state_addr = 0UL;
828 ent->enc_key_addr = __pa(&ctx->key);
829 ent->enc_iv_addr = cp->iv_paddr;
830 ent->dest_addr = (in_place ? 0UL : cp->dest_paddr);
831
832 for (i = 1; i < cp->arr_len; i++) {
833 ent = spu_queue_next(qp, ent);
834
835 ent->control = cp->arr[i].src_len - 1;
836 ent->src_addr = cp->arr[i].src_paddr;
837 ent->auth_key_addr = 0UL;
838 ent->auth_iv_addr = 0UL;
839 ent->final_auth_state_addr = 0UL;
840 ent->enc_key_addr = 0UL;
841 ent->enc_iv_addr = 0UL;
842 ent->dest_addr = 0UL;
843 }
844 ent->control |= CONTROL_END_OF_BLOCK;
845
846 return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0;
847}
848
849static int n2_compute_chunks(struct skcipher_request *req)
850{
851 struct n2_request_context *rctx = skcipher_request_ctx(req);
852 struct skcipher_walk *walk = &rctx->walk;
853 struct n2_crypto_chunk *chunk;
854 unsigned long dest_prev;
855 unsigned int tot_len;
856 bool prev_in_place;
857 int err, nbytes;
858
859 err = skcipher_walk_async(walk, req);
860 if (err)
861 return err;
862
863 INIT_LIST_HEAD(&rctx->chunk_list);
864
865 chunk = &rctx->chunk;
866 INIT_LIST_HEAD(&chunk->entry);
867
868 chunk->iv_paddr = 0UL;
869 chunk->arr_len = 0;
870 chunk->dest_paddr = 0UL;
871
872 prev_in_place = false;
873 dest_prev = ~0UL;
874 tot_len = 0;
875
876 while ((nbytes = walk->nbytes) != 0) {
877 unsigned long dest_paddr, src_paddr;
878 bool in_place;
879 int this_len;
880
881 src_paddr = (page_to_phys(walk->src.phys.page) +
882 walk->src.phys.offset);
883 dest_paddr = (page_to_phys(walk->dst.phys.page) +
884 walk->dst.phys.offset);
885 in_place = (src_paddr == dest_paddr);
886 this_len = skcipher_descriptor_len(nbytes, walk->blocksize);
887
888 if (chunk->arr_len != 0) {
889 if (in_place != prev_in_place ||
890 (!prev_in_place &&
891 dest_paddr != dest_prev) ||
892 chunk->arr_len == N2_CHUNK_ARR_LEN ||
893 tot_len + this_len > (1 << 16)) {
894 chunk->dest_final = dest_prev;
895 list_add_tail(&chunk->entry,
896 &rctx->chunk_list);
897 chunk = kzalloc(sizeof(*chunk), GFP_ATOMIC);
898 if (!chunk) {
899 err = -ENOMEM;
900 break;
901 }
902 INIT_LIST_HEAD(&chunk->entry);
903 }
904 }
905 if (chunk->arr_len == 0) {
906 chunk->dest_paddr = dest_paddr;
907 tot_len = 0;
908 }
909 chunk->arr[chunk->arr_len].src_paddr = src_paddr;
910 chunk->arr[chunk->arr_len].src_len = this_len;
911 chunk->arr_len++;
912
913 dest_prev = dest_paddr + this_len;
914 prev_in_place = in_place;
915 tot_len += this_len;
916
917 err = skcipher_walk_done(walk, nbytes - this_len);
918 if (err)
919 break;
920 }
921 if (!err && chunk->arr_len != 0) {
922 chunk->dest_final = dest_prev;
923 list_add_tail(&chunk->entry, &rctx->chunk_list);
924 }
925
926 return err;
927}
928
929static void n2_chunk_complete(struct skcipher_request *req, void *final_iv)
930{
931 struct n2_request_context *rctx = skcipher_request_ctx(req);
932 struct n2_crypto_chunk *c, *tmp;
933
934 if (final_iv)
935 memcpy(rctx->walk.iv, final_iv, rctx->walk.blocksize);
936
937 list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
938 list_del(&c->entry);
939 if (unlikely(c != &rctx->chunk))
940 kfree(c);
941 }
942
943}
944
945static int n2_do_ecb(struct skcipher_request *req, bool encrypt)
946{
947 struct n2_request_context *rctx = skcipher_request_ctx(req);
948 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
949 int err = n2_compute_chunks(req);
950 struct n2_crypto_chunk *c, *tmp;
951 unsigned long flags, hv_ret;
952 struct spu_queue *qp;
953
954 if (err)
955 return err;
956
957 qp = cpu_to_cwq[get_cpu()];
958 err = -ENODEV;
959 if (!qp)
960 goto out;
961
962 spin_lock_irqsave(&qp->lock, flags);
963
964 list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
965 err = __n2_crypt_chunk(tfm, c, qp, encrypt);
966 if (err)
967 break;
968 list_del(&c->entry);
969 if (unlikely(c != &rctx->chunk))
970 kfree(c);
971 }
972 if (!err) {
973 hv_ret = wait_for_tail(qp);
974 if (hv_ret != HV_EOK)
975 err = -EINVAL;
976 }
977
978 spin_unlock_irqrestore(&qp->lock, flags);
979
980out:
981 put_cpu();
982
983 n2_chunk_complete(req, NULL);
984 return err;
985}
986
987static int n2_encrypt_ecb(struct skcipher_request *req)
988{
989 return n2_do_ecb(req, true);
990}
991
992static int n2_decrypt_ecb(struct skcipher_request *req)
993{
994 return n2_do_ecb(req, false);
995}
996
997static int n2_do_chaining(struct skcipher_request *req, bool encrypt)
998{
999 struct n2_request_context *rctx = skcipher_request_ctx(req);
1000 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
1001 unsigned long flags, hv_ret, iv_paddr;
1002 int err = n2_compute_chunks(req);
1003 struct n2_crypto_chunk *c, *tmp;
1004 struct spu_queue *qp;
1005 void *final_iv_addr;
1006
1007 final_iv_addr = NULL;
1008
1009 if (err)
1010 return err;
1011
1012 qp = cpu_to_cwq[get_cpu()];
1013 err = -ENODEV;
1014 if (!qp)
1015 goto out;
1016
1017 spin_lock_irqsave(&qp->lock, flags);
1018
1019 if (encrypt) {
1020 iv_paddr = __pa(rctx->walk.iv);
1021 list_for_each_entry_safe(c, tmp, &rctx->chunk_list,
1022 entry) {
1023 c->iv_paddr = iv_paddr;
1024 err = __n2_crypt_chunk(tfm, c, qp, true);
1025 if (err)
1026 break;
1027 iv_paddr = c->dest_final - rctx->walk.blocksize;
1028 list_del(&c->entry);
1029 if (unlikely(c != &rctx->chunk))
1030 kfree(c);
1031 }
1032 final_iv_addr = __va(iv_paddr);
1033 } else {
1034 list_for_each_entry_safe_reverse(c, tmp, &rctx->chunk_list,
1035 entry) {
1036 if (c == &rctx->chunk) {
1037 iv_paddr = __pa(rctx->walk.iv);
1038 } else {
1039 iv_paddr = (tmp->arr[tmp->arr_len-1].src_paddr +
1040 tmp->arr[tmp->arr_len-1].src_len -
1041 rctx->walk.blocksize);
1042 }
1043 if (!final_iv_addr) {
1044 unsigned long pa;
1045
1046 pa = (c->arr[c->arr_len-1].src_paddr +
1047 c->arr[c->arr_len-1].src_len -
1048 rctx->walk.blocksize);
1049 final_iv_addr = rctx->temp_iv;
1050 memcpy(rctx->temp_iv, __va(pa),
1051 rctx->walk.blocksize);
1052 }
1053 c->iv_paddr = iv_paddr;
1054 err = __n2_crypt_chunk(tfm, c, qp, false);
1055 if (err)
1056 break;
1057 list_del(&c->entry);
1058 if (unlikely(c != &rctx->chunk))
1059 kfree(c);
1060 }
1061 }
1062 if (!err) {
1063 hv_ret = wait_for_tail(qp);
1064 if (hv_ret != HV_EOK)
1065 err = -EINVAL;
1066 }
1067
1068 spin_unlock_irqrestore(&qp->lock, flags);
1069
1070out:
1071 put_cpu();
1072
1073 n2_chunk_complete(req, err ? NULL : final_iv_addr);
1074 return err;
1075}
1076
1077static int n2_encrypt_chaining(struct skcipher_request *req)
1078{
1079 return n2_do_chaining(req, true);
1080}
1081
1082static int n2_decrypt_chaining(struct skcipher_request *req)
1083{
1084 return n2_do_chaining(req, false);
1085}
1086
1087struct n2_skcipher_tmpl {
1088 const char *name;
1089 const char *drv_name;
1090 u8 block_size;
1091 u8 enc_type;
1092 struct skcipher_alg skcipher;
1093};
1094
1095static const struct n2_skcipher_tmpl skcipher_tmpls[] = {
1096 /* DES: ECB CBC and CFB are supported */
1097 { .name = "ecb(des)",
1098 .drv_name = "ecb-des",
1099 .block_size = DES_BLOCK_SIZE,
1100 .enc_type = (ENC_TYPE_ALG_DES |
1101 ENC_TYPE_CHAINING_ECB),
1102 .skcipher = {
1103 .min_keysize = DES_KEY_SIZE,
1104 .max_keysize = DES_KEY_SIZE,
1105 .setkey = n2_des_setkey,
1106 .encrypt = n2_encrypt_ecb,
1107 .decrypt = n2_decrypt_ecb,
1108 },
1109 },
1110 { .name = "cbc(des)",
1111 .drv_name = "cbc-des",
1112 .block_size = DES_BLOCK_SIZE,
1113 .enc_type = (ENC_TYPE_ALG_DES |
1114 ENC_TYPE_CHAINING_CBC),
1115 .skcipher = {
1116 .ivsize = DES_BLOCK_SIZE,
1117 .min_keysize = DES_KEY_SIZE,
1118 .max_keysize = DES_KEY_SIZE,
1119 .setkey = n2_des_setkey,
1120 .encrypt = n2_encrypt_chaining,
1121 .decrypt = n2_decrypt_chaining,
1122 },
1123 },
1124
1125 /* 3DES: ECB CBC and CFB are supported */
1126 { .name = "ecb(des3_ede)",
1127 .drv_name = "ecb-3des",
1128 .block_size = DES_BLOCK_SIZE,
1129 .enc_type = (ENC_TYPE_ALG_3DES |
1130 ENC_TYPE_CHAINING_ECB),
1131 .skcipher = {
1132 .min_keysize = 3 * DES_KEY_SIZE,
1133 .max_keysize = 3 * DES_KEY_SIZE,
1134 .setkey = n2_3des_setkey,
1135 .encrypt = n2_encrypt_ecb,
1136 .decrypt = n2_decrypt_ecb,
1137 },
1138 },
1139 { .name = "cbc(des3_ede)",
1140 .drv_name = "cbc-3des",
1141 .block_size = DES_BLOCK_SIZE,
1142 .enc_type = (ENC_TYPE_ALG_3DES |
1143 ENC_TYPE_CHAINING_CBC),
1144 .skcipher = {
1145 .ivsize = DES_BLOCK_SIZE,
1146 .min_keysize = 3 * DES_KEY_SIZE,
1147 .max_keysize = 3 * DES_KEY_SIZE,
1148 .setkey = n2_3des_setkey,
1149 .encrypt = n2_encrypt_chaining,
1150 .decrypt = n2_decrypt_chaining,
1151 },
1152 },
1153
1154 /* AES: ECB CBC and CTR are supported */
1155 { .name = "ecb(aes)",
1156 .drv_name = "ecb-aes",
1157 .block_size = AES_BLOCK_SIZE,
1158 .enc_type = (ENC_TYPE_ALG_AES128 |
1159 ENC_TYPE_CHAINING_ECB),
1160 .skcipher = {
1161 .min_keysize = AES_MIN_KEY_SIZE,
1162 .max_keysize = AES_MAX_KEY_SIZE,
1163 .setkey = n2_aes_setkey,
1164 .encrypt = n2_encrypt_ecb,
1165 .decrypt = n2_decrypt_ecb,
1166 },
1167 },
1168 { .name = "cbc(aes)",
1169 .drv_name = "cbc-aes",
1170 .block_size = AES_BLOCK_SIZE,
1171 .enc_type = (ENC_TYPE_ALG_AES128 |
1172 ENC_TYPE_CHAINING_CBC),
1173 .skcipher = {
1174 .ivsize = AES_BLOCK_SIZE,
1175 .min_keysize = AES_MIN_KEY_SIZE,
1176 .max_keysize = AES_MAX_KEY_SIZE,
1177 .setkey = n2_aes_setkey,
1178 .encrypt = n2_encrypt_chaining,
1179 .decrypt = n2_decrypt_chaining,
1180 },
1181 },
1182 { .name = "ctr(aes)",
1183 .drv_name = "ctr-aes",
1184 .block_size = AES_BLOCK_SIZE,
1185 .enc_type = (ENC_TYPE_ALG_AES128 |
1186 ENC_TYPE_CHAINING_COUNTER),
1187 .skcipher = {
1188 .ivsize = AES_BLOCK_SIZE,
1189 .min_keysize = AES_MIN_KEY_SIZE,
1190 .max_keysize = AES_MAX_KEY_SIZE,
1191 .setkey = n2_aes_setkey,
1192 .encrypt = n2_encrypt_chaining,
1193 .decrypt = n2_encrypt_chaining,
1194 },
1195 },
1196
1197};
1198#define NUM_CIPHER_TMPLS ARRAY_SIZE(skcipher_tmpls)
1199
1200static LIST_HEAD(skcipher_algs);
1201
1202struct n2_hash_tmpl {
1203 const char *name;
1204 const u8 *hash_zero;
1205 const u8 *hash_init;
1206 u8 hw_op_hashsz;
1207 u8 digest_size;
1208 u8 statesize;
1209 u8 block_size;
1210 u8 auth_type;
1211 u8 hmac_type;
1212};
1213
1214static const __le32 n2_md5_init[MD5_HASH_WORDS] = {
1215 cpu_to_le32(MD5_H0),
1216 cpu_to_le32(MD5_H1),
1217 cpu_to_le32(MD5_H2),
1218 cpu_to_le32(MD5_H3),
1219};
1220static const u32 n2_sha1_init[SHA1_DIGEST_SIZE / 4] = {
1221 SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4,
1222};
1223static const u32 n2_sha256_init[SHA256_DIGEST_SIZE / 4] = {
1224 SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
1225 SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7,
1226};
1227static const u32 n2_sha224_init[SHA256_DIGEST_SIZE / 4] = {
1228 SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
1229 SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7,
1230};
1231
1232static const struct n2_hash_tmpl hash_tmpls[] = {
1233 { .name = "md5",
1234 .hash_zero = md5_zero_message_hash,
1235 .hash_init = (u8 *)n2_md5_init,
1236 .auth_type = AUTH_TYPE_MD5,
1237 .hmac_type = AUTH_TYPE_HMAC_MD5,
1238 .hw_op_hashsz = MD5_DIGEST_SIZE,
1239 .digest_size = MD5_DIGEST_SIZE,
1240 .statesize = sizeof(struct md5_state),
1241 .block_size = MD5_HMAC_BLOCK_SIZE },
1242 { .name = "sha1",
1243 .hash_zero = sha1_zero_message_hash,
1244 .hash_init = (u8 *)n2_sha1_init,
1245 .auth_type = AUTH_TYPE_SHA1,
1246 .hmac_type = AUTH_TYPE_HMAC_SHA1,
1247 .hw_op_hashsz = SHA1_DIGEST_SIZE,
1248 .digest_size = SHA1_DIGEST_SIZE,
1249 .statesize = sizeof(struct sha1_state),
1250 .block_size = SHA1_BLOCK_SIZE },
1251 { .name = "sha256",
1252 .hash_zero = sha256_zero_message_hash,
1253 .hash_init = (u8 *)n2_sha256_init,
1254 .auth_type = AUTH_TYPE_SHA256,
1255 .hmac_type = AUTH_TYPE_HMAC_SHA256,
1256 .hw_op_hashsz = SHA256_DIGEST_SIZE,
1257 .digest_size = SHA256_DIGEST_SIZE,
1258 .statesize = sizeof(struct sha256_state),
1259 .block_size = SHA256_BLOCK_SIZE },
1260 { .name = "sha224",
1261 .hash_zero = sha224_zero_message_hash,
1262 .hash_init = (u8 *)n2_sha224_init,
1263 .auth_type = AUTH_TYPE_SHA256,
1264 .hmac_type = AUTH_TYPE_RESERVED,
1265 .hw_op_hashsz = SHA256_DIGEST_SIZE,
1266 .digest_size = SHA224_DIGEST_SIZE,
1267 .statesize = sizeof(struct sha256_state),
1268 .block_size = SHA224_BLOCK_SIZE },
1269};
1270#define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls)
1271
1272static LIST_HEAD(ahash_algs);
1273static LIST_HEAD(hmac_algs);
1274
1275static int algs_registered;
1276
1277static void __n2_unregister_algs(void)
1278{
1279 struct n2_skcipher_alg *skcipher, *skcipher_tmp;
1280 struct n2_ahash_alg *alg, *alg_tmp;
1281 struct n2_hmac_alg *hmac, *hmac_tmp;
1282
1283 list_for_each_entry_safe(skcipher, skcipher_tmp, &skcipher_algs, entry) {
1284 crypto_unregister_skcipher(&skcipher->skcipher);
1285 list_del(&skcipher->entry);
1286 kfree(skcipher);
1287 }
1288 list_for_each_entry_safe(hmac, hmac_tmp, &hmac_algs, derived.entry) {
1289 crypto_unregister_ahash(&hmac->derived.alg);
1290 list_del(&hmac->derived.entry);
1291 kfree(hmac);
1292 }
1293 list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) {
1294 crypto_unregister_ahash(&alg->alg);
1295 list_del(&alg->entry);
1296 kfree(alg);
1297 }
1298}
1299
1300static int n2_skcipher_init_tfm(struct crypto_skcipher *tfm)
1301{
1302 crypto_skcipher_set_reqsize(tfm, sizeof(struct n2_request_context));
1303 return 0;
1304}
1305
1306static int __n2_register_one_skcipher(const struct n2_skcipher_tmpl *tmpl)
1307{
1308 struct n2_skcipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1309 struct skcipher_alg *alg;
1310 int err;
1311
1312 if (!p)
1313 return -ENOMEM;
1314
1315 alg = &p->skcipher;
1316 *alg = tmpl->skcipher;
1317
1318 snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
1319 snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name);
1320 alg->base.cra_priority = N2_CRA_PRIORITY;
1321 alg->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
1322 CRYPTO_ALG_ALLOCATES_MEMORY;
1323 alg->base.cra_blocksize = tmpl->block_size;
1324 p->enc_type = tmpl->enc_type;
1325 alg->base.cra_ctxsize = sizeof(struct n2_skcipher_context);
1326 alg->base.cra_module = THIS_MODULE;
1327 alg->init = n2_skcipher_init_tfm;
1328
1329 list_add(&p->entry, &skcipher_algs);
1330 err = crypto_register_skcipher(alg);
1331 if (err) {
1332 pr_err("%s alg registration failed\n", alg->base.cra_name);
1333 list_del(&p->entry);
1334 kfree(p);
1335 } else {
1336 pr_info("%s alg registered\n", alg->base.cra_name);
1337 }
1338 return err;
1339}
1340
1341static int __n2_register_one_hmac(struct n2_ahash_alg *n2ahash)
1342{
1343 struct n2_hmac_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1344 struct ahash_alg *ahash;
1345 struct crypto_alg *base;
1346 int err;
1347
1348 if (!p)
1349 return -ENOMEM;
1350
1351 p->child_alg = n2ahash->alg.halg.base.cra_name;
1352 memcpy(&p->derived, n2ahash, sizeof(struct n2_ahash_alg));
1353 INIT_LIST_HEAD(&p->derived.entry);
1354
1355 ahash = &p->derived.alg;
1356 ahash->digest = n2_hmac_async_digest;
1357 ahash->setkey = n2_hmac_async_setkey;
1358
1359 base = &ahash->halg.base;
1360 if (snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
1361 p->child_alg) >= CRYPTO_MAX_ALG_NAME)
1362 goto out_free_p;
1363 if (snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s-n2",
1364 p->child_alg) >= CRYPTO_MAX_ALG_NAME)
1365 goto out_free_p;
1366
1367 base->cra_ctxsize = sizeof(struct n2_hmac_ctx);
1368 base->cra_init = n2_hmac_cra_init;
1369 base->cra_exit = n2_hmac_cra_exit;
1370
1371 list_add(&p->derived.entry, &hmac_algs);
1372 err = crypto_register_ahash(ahash);
1373 if (err) {
1374 pr_err("%s alg registration failed\n", base->cra_name);
1375 list_del(&p->derived.entry);
1376out_free_p:
1377 kfree(p);
1378 } else {
1379 pr_info("%s alg registered\n", base->cra_name);
1380 }
1381 return err;
1382}
1383
1384static int __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
1385{
1386 struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1387 struct hash_alg_common *halg;
1388 struct crypto_alg *base;
1389 struct ahash_alg *ahash;
1390 int err;
1391
1392 if (!p)
1393 return -ENOMEM;
1394
1395 p->hash_zero = tmpl->hash_zero;
1396 p->hash_init = tmpl->hash_init;
1397 p->auth_type = tmpl->auth_type;
1398 p->hmac_type = tmpl->hmac_type;
1399 p->hw_op_hashsz = tmpl->hw_op_hashsz;
1400 p->digest_size = tmpl->digest_size;
1401
1402 ahash = &p->alg;
1403 ahash->init = n2_hash_async_init;
1404 ahash->update = n2_hash_async_update;
1405 ahash->final = n2_hash_async_final;
1406 ahash->finup = n2_hash_async_finup;
1407 ahash->digest = n2_hash_async_digest;
1408 ahash->export = n2_hash_async_noexport;
1409 ahash->import = n2_hash_async_noimport;
1410
1411 halg = &ahash->halg;
1412 halg->digestsize = tmpl->digest_size;
1413 halg->statesize = tmpl->statesize;
1414
1415 base = &halg->base;
1416 snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
1417 snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name);
1418 base->cra_priority = N2_CRA_PRIORITY;
1419 base->cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1420 CRYPTO_ALG_NEED_FALLBACK;
1421 base->cra_blocksize = tmpl->block_size;
1422 base->cra_ctxsize = sizeof(struct n2_hash_ctx);
1423 base->cra_module = THIS_MODULE;
1424 base->cra_init = n2_hash_cra_init;
1425 base->cra_exit = n2_hash_cra_exit;
1426
1427 list_add(&p->entry, &ahash_algs);
1428 err = crypto_register_ahash(ahash);
1429 if (err) {
1430 pr_err("%s alg registration failed\n", base->cra_name);
1431 list_del(&p->entry);
1432 kfree(p);
1433 } else {
1434 pr_info("%s alg registered\n", base->cra_name);
1435 }
1436 if (!err && p->hmac_type != AUTH_TYPE_RESERVED)
1437 err = __n2_register_one_hmac(p);
1438 return err;
1439}
1440
1441static int n2_register_algs(void)
1442{
1443 int i, err = 0;
1444
1445 mutex_lock(&spu_lock);
1446 if (algs_registered++)
1447 goto out;
1448
1449 for (i = 0; i < NUM_HASH_TMPLS; i++) {
1450 err = __n2_register_one_ahash(&hash_tmpls[i]);
1451 if (err) {
1452 __n2_unregister_algs();
1453 goto out;
1454 }
1455 }
1456 for (i = 0; i < NUM_CIPHER_TMPLS; i++) {
1457 err = __n2_register_one_skcipher(&skcipher_tmpls[i]);
1458 if (err) {
1459 __n2_unregister_algs();
1460 goto out;
1461 }
1462 }
1463
1464out:
1465 mutex_unlock(&spu_lock);
1466 return err;
1467}
1468
1469static void n2_unregister_algs(void)
1470{
1471 mutex_lock(&spu_lock);
1472 if (!--algs_registered)
1473 __n2_unregister_algs();
1474 mutex_unlock(&spu_lock);
1475}
1476
1477/* To map CWQ queues to interrupt sources, the hypervisor API provides
1478 * a devino. This isn't very useful to us because all of the
1479 * interrupts listed in the device_node have been translated to
1480 * Linux virtual IRQ cookie numbers.
1481 *
1482 * So we have to back-translate, going through the 'intr' and 'ino'
1483 * property tables of the n2cp MDESC node, matching it with the OF
1484 * 'interrupts' property entries, in order to figure out which
1485 * devino goes to which already-translated IRQ.
1486 */
1487static int find_devino_index(struct platform_device *dev, struct spu_mdesc_info *ip,
1488 unsigned long dev_ino)
1489{
1490 const unsigned int *dev_intrs;
1491 unsigned int intr;
1492 int i;
1493
1494 for (i = 0; i < ip->num_intrs; i++) {
1495 if (ip->ino_table[i].ino == dev_ino)
1496 break;
1497 }
1498 if (i == ip->num_intrs)
1499 return -ENODEV;
1500
1501 intr = ip->ino_table[i].intr;
1502
1503 dev_intrs = of_get_property(dev->dev.of_node, "interrupts", NULL);
1504 if (!dev_intrs)
1505 return -ENODEV;
1506
1507 for (i = 0; i < dev->archdata.num_irqs; i++) {
1508 if (dev_intrs[i] == intr)
1509 return i;
1510 }
1511
1512 return -ENODEV;
1513}
1514
1515static int spu_map_ino(struct platform_device *dev, struct spu_mdesc_info *ip,
1516 const char *irq_name, struct spu_queue *p,
1517 irq_handler_t handler)
1518{
1519 unsigned long herr;
1520 int index;
1521
1522 herr = sun4v_ncs_qhandle_to_devino(p->qhandle, &p->devino);
1523 if (herr)
1524 return -EINVAL;
1525
1526 index = find_devino_index(dev, ip, p->devino);
1527 if (index < 0)
1528 return index;
1529
1530 p->irq = dev->archdata.irqs[index];
1531
1532 sprintf(p->irq_name, "%s-%d", irq_name, index);
1533
1534 return request_irq(p->irq, handler, 0, p->irq_name, p);
1535}
1536
1537static struct kmem_cache *queue_cache[2];
1538
1539static void *new_queue(unsigned long q_type)
1540{
1541 return kmem_cache_zalloc(queue_cache[q_type - 1], GFP_KERNEL);
1542}
1543
1544static void free_queue(void *p, unsigned long q_type)
1545{
1546 kmem_cache_free(queue_cache[q_type - 1], p);
1547}
1548
1549static int queue_cache_init(void)
1550{
1551 if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
1552 queue_cache[HV_NCS_QTYPE_MAU - 1] =
1553 kmem_cache_create("mau_queue",
1554 (MAU_NUM_ENTRIES *
1555 MAU_ENTRY_SIZE),
1556 MAU_ENTRY_SIZE, 0, NULL);
1557 if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
1558 return -ENOMEM;
1559
1560 if (!queue_cache[HV_NCS_QTYPE_CWQ - 1])
1561 queue_cache[HV_NCS_QTYPE_CWQ - 1] =
1562 kmem_cache_create("cwq_queue",
1563 (CWQ_NUM_ENTRIES *
1564 CWQ_ENTRY_SIZE),
1565 CWQ_ENTRY_SIZE, 0, NULL);
1566 if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) {
1567 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
1568 queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
1569 return -ENOMEM;
1570 }
1571 return 0;
1572}
1573
1574static void queue_cache_destroy(void)
1575{
1576 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
1577 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
1578 queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
1579 queue_cache[HV_NCS_QTYPE_CWQ - 1] = NULL;
1580}
1581
1582static long spu_queue_register_workfn(void *arg)
1583{
1584 struct spu_qreg *qr = arg;
1585 struct spu_queue *p = qr->queue;
1586 unsigned long q_type = qr->type;
1587 unsigned long hv_ret;
1588
1589 hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q),
1590 CWQ_NUM_ENTRIES, &p->qhandle);
1591 if (!hv_ret)
1592 sun4v_ncs_sethead_marker(p->qhandle, 0);
1593
1594 return hv_ret ? -EINVAL : 0;
1595}
1596
1597static int spu_queue_register(struct spu_queue *p, unsigned long q_type)
1598{
1599 int cpu = cpumask_any_and(&p->sharing, cpu_online_mask);
1600 struct spu_qreg qr = { .queue = p, .type = q_type };
1601
1602 return work_on_cpu_safe(cpu, spu_queue_register_workfn, &qr);
1603}
1604
1605static int spu_queue_setup(struct spu_queue *p)
1606{
1607 int err;
1608
1609 p->q = new_queue(p->q_type);
1610 if (!p->q)
1611 return -ENOMEM;
1612
1613 err = spu_queue_register(p, p->q_type);
1614 if (err) {
1615 free_queue(p->q, p->q_type);
1616 p->q = NULL;
1617 }
1618
1619 return err;
1620}
1621
1622static void spu_queue_destroy(struct spu_queue *p)
1623{
1624 unsigned long hv_ret;
1625
1626 if (!p->q)
1627 return;
1628
1629 hv_ret = sun4v_ncs_qconf(p->q_type, p->qhandle, 0, &p->qhandle);
1630
1631 if (!hv_ret)
1632 free_queue(p->q, p->q_type);
1633}
1634
1635static void spu_list_destroy(struct list_head *list)
1636{
1637 struct spu_queue *p, *n;
1638
1639 list_for_each_entry_safe(p, n, list, list) {
1640 int i;
1641
1642 for (i = 0; i < NR_CPUS; i++) {
1643 if (cpu_to_cwq[i] == p)
1644 cpu_to_cwq[i] = NULL;
1645 }
1646
1647 if (p->irq) {
1648 free_irq(p->irq, p);
1649 p->irq = 0;
1650 }
1651 spu_queue_destroy(p);
1652 list_del(&p->list);
1653 kfree(p);
1654 }
1655}
1656
1657/* Walk the backward arcs of a CWQ 'exec-unit' node,
1658 * gathering cpu membership information.
1659 */
1660static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc,
1661 struct platform_device *dev,
1662 u64 node, struct spu_queue *p,
1663 struct spu_queue **table)
1664{
1665 u64 arc;
1666
1667 mdesc_for_each_arc(arc, mdesc, node, MDESC_ARC_TYPE_BACK) {
1668 u64 tgt = mdesc_arc_target(mdesc, arc);
1669 const char *name = mdesc_node_name(mdesc, tgt);
1670 const u64 *id;
1671
1672 if (strcmp(name, "cpu"))
1673 continue;
1674 id = mdesc_get_property(mdesc, tgt, "id", NULL);
1675 if (table[*id] != NULL) {
1676 dev_err(&dev->dev, "%pOF: SPU cpu slot already set.\n",
1677 dev->dev.of_node);
1678 return -EINVAL;
1679 }
1680 cpumask_set_cpu(*id, &p->sharing);
1681 table[*id] = p;
1682 }
1683 return 0;
1684}
1685
1686/* Process an 'exec-unit' MDESC node of type 'cwq'. */
1687static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list,
1688 struct platform_device *dev, struct mdesc_handle *mdesc,
1689 u64 node, const char *iname, unsigned long q_type,
1690 irq_handler_t handler, struct spu_queue **table)
1691{
1692 struct spu_queue *p;
1693 int err;
1694
1695 p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL);
1696 if (!p) {
1697 dev_err(&dev->dev, "%pOF: Could not allocate SPU queue.\n",
1698 dev->dev.of_node);
1699 return -ENOMEM;
1700 }
1701
1702 cpumask_clear(&p->sharing);
1703 spin_lock_init(&p->lock);
1704 p->q_type = q_type;
1705 INIT_LIST_HEAD(&p->jobs);
1706 list_add(&p->list, list);
1707
1708 err = spu_mdesc_walk_arcs(mdesc, dev, node, p, table);
1709 if (err)
1710 return err;
1711
1712 err = spu_queue_setup(p);
1713 if (err)
1714 return err;
1715
1716 return spu_map_ino(dev, ip, iname, p, handler);
1717}
1718
1719static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct platform_device *dev,
1720 struct spu_mdesc_info *ip, struct list_head *list,
1721 const char *exec_name, unsigned long q_type,
1722 irq_handler_t handler, struct spu_queue **table)
1723{
1724 int err = 0;
1725 u64 node;
1726
1727 mdesc_for_each_node_by_name(mdesc, node, "exec-unit") {
1728 const char *type;
1729
1730 type = mdesc_get_property(mdesc, node, "type", NULL);
1731 if (!type || strcmp(type, exec_name))
1732 continue;
1733
1734 err = handle_exec_unit(ip, list, dev, mdesc, node,
1735 exec_name, q_type, handler, table);
1736 if (err) {
1737 spu_list_destroy(list);
1738 break;
1739 }
1740 }
1741
1742 return err;
1743}
1744
1745static int get_irq_props(struct mdesc_handle *mdesc, u64 node,
1746 struct spu_mdesc_info *ip)
1747{
1748 const u64 *ino;
1749 int ino_len;
1750 int i;
1751
1752 ino = mdesc_get_property(mdesc, node, "ino", &ino_len);
1753 if (!ino) {
1754 printk("NO 'ino'\n");
1755 return -ENODEV;
1756 }
1757
1758 ip->num_intrs = ino_len / sizeof(u64);
1759 ip->ino_table = kzalloc((sizeof(struct ino_blob) *
1760 ip->num_intrs),
1761 GFP_KERNEL);
1762 if (!ip->ino_table)
1763 return -ENOMEM;
1764
1765 for (i = 0; i < ip->num_intrs; i++) {
1766 struct ino_blob *b = &ip->ino_table[i];
1767 b->intr = i + 1;
1768 b->ino = ino[i];
1769 }
1770
1771 return 0;
1772}
1773
1774static int grab_mdesc_irq_props(struct mdesc_handle *mdesc,
1775 struct platform_device *dev,
1776 struct spu_mdesc_info *ip,
1777 const char *node_name)
1778{
1779 u64 node, reg;
1780
1781 if (of_property_read_reg(dev->dev.of_node, 0, ®, NULL) < 0)
1782 return -ENODEV;
1783
1784 mdesc_for_each_node_by_name(mdesc, node, "virtual-device") {
1785 const char *name;
1786 const u64 *chdl;
1787
1788 name = mdesc_get_property(mdesc, node, "name", NULL);
1789 if (!name || strcmp(name, node_name))
1790 continue;
1791 chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL);
1792 if (!chdl || (*chdl != reg))
1793 continue;
1794 ip->cfg_handle = *chdl;
1795 return get_irq_props(mdesc, node, ip);
1796 }
1797
1798 return -ENODEV;
1799}
1800
1801static unsigned long n2_spu_hvapi_major;
1802static unsigned long n2_spu_hvapi_minor;
1803
1804static int n2_spu_hvapi_register(void)
1805{
1806 int err;
1807
1808 n2_spu_hvapi_major = 2;
1809 n2_spu_hvapi_minor = 0;
1810
1811 err = sun4v_hvapi_register(HV_GRP_NCS,
1812 n2_spu_hvapi_major,
1813 &n2_spu_hvapi_minor);
1814
1815 if (!err)
1816 pr_info("Registered NCS HVAPI version %lu.%lu\n",
1817 n2_spu_hvapi_major,
1818 n2_spu_hvapi_minor);
1819
1820 return err;
1821}
1822
1823static void n2_spu_hvapi_unregister(void)
1824{
1825 sun4v_hvapi_unregister(HV_GRP_NCS);
1826}
1827
1828static int global_ref;
1829
1830static int grab_global_resources(void)
1831{
1832 int err = 0;
1833
1834 mutex_lock(&spu_lock);
1835
1836 if (global_ref++)
1837 goto out;
1838
1839 err = n2_spu_hvapi_register();
1840 if (err)
1841 goto out;
1842
1843 err = queue_cache_init();
1844 if (err)
1845 goto out_hvapi_release;
1846
1847 err = -ENOMEM;
1848 cpu_to_cwq = kcalloc(NR_CPUS, sizeof(struct spu_queue *),
1849 GFP_KERNEL);
1850 if (!cpu_to_cwq)
1851 goto out_queue_cache_destroy;
1852
1853 cpu_to_mau = kcalloc(NR_CPUS, sizeof(struct spu_queue *),
1854 GFP_KERNEL);
1855 if (!cpu_to_mau)
1856 goto out_free_cwq_table;
1857
1858 err = 0;
1859
1860out:
1861 if (err)
1862 global_ref--;
1863 mutex_unlock(&spu_lock);
1864 return err;
1865
1866out_free_cwq_table:
1867 kfree(cpu_to_cwq);
1868 cpu_to_cwq = NULL;
1869
1870out_queue_cache_destroy:
1871 queue_cache_destroy();
1872
1873out_hvapi_release:
1874 n2_spu_hvapi_unregister();
1875 goto out;
1876}
1877
1878static void release_global_resources(void)
1879{
1880 mutex_lock(&spu_lock);
1881 if (!--global_ref) {
1882 kfree(cpu_to_cwq);
1883 cpu_to_cwq = NULL;
1884
1885 kfree(cpu_to_mau);
1886 cpu_to_mau = NULL;
1887
1888 queue_cache_destroy();
1889 n2_spu_hvapi_unregister();
1890 }
1891 mutex_unlock(&spu_lock);
1892}
1893
1894static struct n2_crypto *alloc_n2cp(void)
1895{
1896 struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL);
1897
1898 if (np)
1899 INIT_LIST_HEAD(&np->cwq_list);
1900
1901 return np;
1902}
1903
1904static void free_n2cp(struct n2_crypto *np)
1905{
1906 kfree(np->cwq_info.ino_table);
1907 np->cwq_info.ino_table = NULL;
1908
1909 kfree(np);
1910}
1911
1912static void n2_spu_driver_version(void)
1913{
1914 static int n2_spu_version_printed;
1915
1916 if (n2_spu_version_printed++ == 0)
1917 pr_info("%s", version);
1918}
1919
1920static int n2_crypto_probe(struct platform_device *dev)
1921{
1922 struct mdesc_handle *mdesc;
1923 struct n2_crypto *np;
1924 int err;
1925
1926 n2_spu_driver_version();
1927
1928 pr_info("Found N2CP at %pOF\n", dev->dev.of_node);
1929
1930 np = alloc_n2cp();
1931 if (!np) {
1932 dev_err(&dev->dev, "%pOF: Unable to allocate n2cp.\n",
1933 dev->dev.of_node);
1934 return -ENOMEM;
1935 }
1936
1937 err = grab_global_resources();
1938 if (err) {
1939 dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n",
1940 dev->dev.of_node);
1941 goto out_free_n2cp;
1942 }
1943
1944 mdesc = mdesc_grab();
1945
1946 if (!mdesc) {
1947 dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n",
1948 dev->dev.of_node);
1949 err = -ENODEV;
1950 goto out_free_global;
1951 }
1952 err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp");
1953 if (err) {
1954 dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n",
1955 dev->dev.of_node);
1956 mdesc_release(mdesc);
1957 goto out_free_global;
1958 }
1959
1960 err = spu_mdesc_scan(mdesc, dev, &np->cwq_info, &np->cwq_list,
1961 "cwq", HV_NCS_QTYPE_CWQ, cwq_intr,
1962 cpu_to_cwq);
1963 mdesc_release(mdesc);
1964
1965 if (err) {
1966 dev_err(&dev->dev, "%pOF: CWQ MDESC scan failed.\n",
1967 dev->dev.of_node);
1968 goto out_free_global;
1969 }
1970
1971 err = n2_register_algs();
1972 if (err) {
1973 dev_err(&dev->dev, "%pOF: Unable to register algorithms.\n",
1974 dev->dev.of_node);
1975 goto out_free_spu_list;
1976 }
1977
1978 dev_set_drvdata(&dev->dev, np);
1979
1980 return 0;
1981
1982out_free_spu_list:
1983 spu_list_destroy(&np->cwq_list);
1984
1985out_free_global:
1986 release_global_resources();
1987
1988out_free_n2cp:
1989 free_n2cp(np);
1990
1991 return err;
1992}
1993
1994static void n2_crypto_remove(struct platform_device *dev)
1995{
1996 struct n2_crypto *np = dev_get_drvdata(&dev->dev);
1997
1998 n2_unregister_algs();
1999
2000 spu_list_destroy(&np->cwq_list);
2001
2002 release_global_resources();
2003
2004 free_n2cp(np);
2005}
2006
2007static struct n2_mau *alloc_ncp(void)
2008{
2009 struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL);
2010
2011 if (mp)
2012 INIT_LIST_HEAD(&mp->mau_list);
2013
2014 return mp;
2015}
2016
2017static void free_ncp(struct n2_mau *mp)
2018{
2019 kfree(mp->mau_info.ino_table);
2020 mp->mau_info.ino_table = NULL;
2021
2022 kfree(mp);
2023}
2024
2025static int n2_mau_probe(struct platform_device *dev)
2026{
2027 struct mdesc_handle *mdesc;
2028 struct n2_mau *mp;
2029 int err;
2030
2031 n2_spu_driver_version();
2032
2033 pr_info("Found NCP at %pOF\n", dev->dev.of_node);
2034
2035 mp = alloc_ncp();
2036 if (!mp) {
2037 dev_err(&dev->dev, "%pOF: Unable to allocate ncp.\n",
2038 dev->dev.of_node);
2039 return -ENOMEM;
2040 }
2041
2042 err = grab_global_resources();
2043 if (err) {
2044 dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n",
2045 dev->dev.of_node);
2046 goto out_free_ncp;
2047 }
2048
2049 mdesc = mdesc_grab();
2050
2051 if (!mdesc) {
2052 dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n",
2053 dev->dev.of_node);
2054 err = -ENODEV;
2055 goto out_free_global;
2056 }
2057
2058 err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp");
2059 if (err) {
2060 dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n",
2061 dev->dev.of_node);
2062 mdesc_release(mdesc);
2063 goto out_free_global;
2064 }
2065
2066 err = spu_mdesc_scan(mdesc, dev, &mp->mau_info, &mp->mau_list,
2067 "mau", HV_NCS_QTYPE_MAU, mau_intr,
2068 cpu_to_mau);
2069 mdesc_release(mdesc);
2070
2071 if (err) {
2072 dev_err(&dev->dev, "%pOF: MAU MDESC scan failed.\n",
2073 dev->dev.of_node);
2074 goto out_free_global;
2075 }
2076
2077 dev_set_drvdata(&dev->dev, mp);
2078
2079 return 0;
2080
2081out_free_global:
2082 release_global_resources();
2083
2084out_free_ncp:
2085 free_ncp(mp);
2086
2087 return err;
2088}
2089
2090static void n2_mau_remove(struct platform_device *dev)
2091{
2092 struct n2_mau *mp = dev_get_drvdata(&dev->dev);
2093
2094 spu_list_destroy(&mp->mau_list);
2095
2096 release_global_resources();
2097
2098 free_ncp(mp);
2099}
2100
2101static const struct of_device_id n2_crypto_match[] = {
2102 {
2103 .name = "n2cp",
2104 .compatible = "SUNW,n2-cwq",
2105 },
2106 {
2107 .name = "n2cp",
2108 .compatible = "SUNW,vf-cwq",
2109 },
2110 {
2111 .name = "n2cp",
2112 .compatible = "SUNW,kt-cwq",
2113 },
2114 {},
2115};
2116
2117MODULE_DEVICE_TABLE(of, n2_crypto_match);
2118
2119static struct platform_driver n2_crypto_driver = {
2120 .driver = {
2121 .name = "n2cp",
2122 .of_match_table = n2_crypto_match,
2123 },
2124 .probe = n2_crypto_probe,
2125 .remove_new = n2_crypto_remove,
2126};
2127
2128static const struct of_device_id n2_mau_match[] = {
2129 {
2130 .name = "ncp",
2131 .compatible = "SUNW,n2-mau",
2132 },
2133 {
2134 .name = "ncp",
2135 .compatible = "SUNW,vf-mau",
2136 },
2137 {
2138 .name = "ncp",
2139 .compatible = "SUNW,kt-mau",
2140 },
2141 {},
2142};
2143
2144MODULE_DEVICE_TABLE(of, n2_mau_match);
2145
2146static struct platform_driver n2_mau_driver = {
2147 .driver = {
2148 .name = "ncp",
2149 .of_match_table = n2_mau_match,
2150 },
2151 .probe = n2_mau_probe,
2152 .remove_new = n2_mau_remove,
2153};
2154
2155static struct platform_driver * const drivers[] = {
2156 &n2_crypto_driver,
2157 &n2_mau_driver,
2158};
2159
2160static int __init n2_init(void)
2161{
2162 return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
2163}
2164
2165static void __exit n2_exit(void)
2166{
2167 platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
2168}
2169
2170module_init(n2_init);
2171module_exit(n2_exit);
1/* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support.
2 *
3 * Copyright (C) 2010, 2011 David S. Miller <davem@davemloft.net>
4 */
5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/of.h>
11#include <linux/of_device.h>
12#include <linux/cpumask.h>
13#include <linux/slab.h>
14#include <linux/interrupt.h>
15#include <linux/crypto.h>
16#include <crypto/md5.h>
17#include <crypto/sha.h>
18#include <crypto/aes.h>
19#include <crypto/des.h>
20#include <linux/mutex.h>
21#include <linux/delay.h>
22#include <linux/sched.h>
23
24#include <crypto/internal/hash.h>
25#include <crypto/scatterwalk.h>
26#include <crypto/algapi.h>
27
28#include <asm/hypervisor.h>
29#include <asm/mdesc.h>
30
31#include "n2_core.h"
32
33#define DRV_MODULE_NAME "n2_crypto"
34#define DRV_MODULE_VERSION "0.2"
35#define DRV_MODULE_RELDATE "July 28, 2011"
36
37static const char version[] =
38 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
39
40MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
41MODULE_DESCRIPTION("Niagara2 Crypto driver");
42MODULE_LICENSE("GPL");
43MODULE_VERSION(DRV_MODULE_VERSION);
44
45#define N2_CRA_PRIORITY 200
46
47static DEFINE_MUTEX(spu_lock);
48
49struct spu_queue {
50 cpumask_t sharing;
51 unsigned long qhandle;
52
53 spinlock_t lock;
54 u8 q_type;
55 void *q;
56 unsigned long head;
57 unsigned long tail;
58 struct list_head jobs;
59
60 unsigned long devino;
61
62 char irq_name[32];
63 unsigned int irq;
64
65 struct list_head list;
66};
67
68static struct spu_queue **cpu_to_cwq;
69static struct spu_queue **cpu_to_mau;
70
71static unsigned long spu_next_offset(struct spu_queue *q, unsigned long off)
72{
73 if (q->q_type == HV_NCS_QTYPE_MAU) {
74 off += MAU_ENTRY_SIZE;
75 if (off == (MAU_ENTRY_SIZE * MAU_NUM_ENTRIES))
76 off = 0;
77 } else {
78 off += CWQ_ENTRY_SIZE;
79 if (off == (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES))
80 off = 0;
81 }
82 return off;
83}
84
85struct n2_request_common {
86 struct list_head entry;
87 unsigned int offset;
88};
89#define OFFSET_NOT_RUNNING (~(unsigned int)0)
90
91/* An async job request records the final tail value it used in
92 * n2_request_common->offset, test to see if that offset is in
93 * the range old_head, new_head, inclusive.
94 */
95static inline bool job_finished(struct spu_queue *q, unsigned int offset,
96 unsigned long old_head, unsigned long new_head)
97{
98 if (old_head <= new_head) {
99 if (offset > old_head && offset <= new_head)
100 return true;
101 } else {
102 if (offset > old_head || offset <= new_head)
103 return true;
104 }
105 return false;
106}
107
108/* When the HEAD marker is unequal to the actual HEAD, we get
109 * a virtual device INO interrupt. We should process the
110 * completed CWQ entries and adjust the HEAD marker to clear
111 * the IRQ.
112 */
113static irqreturn_t cwq_intr(int irq, void *dev_id)
114{
115 unsigned long off, new_head, hv_ret;
116 struct spu_queue *q = dev_id;
117
118 pr_err("CPU[%d]: Got CWQ interrupt for qhdl[%lx]\n",
119 smp_processor_id(), q->qhandle);
120
121 spin_lock(&q->lock);
122
123 hv_ret = sun4v_ncs_gethead(q->qhandle, &new_head);
124
125 pr_err("CPU[%d]: CWQ gethead[%lx] hv_ret[%lu]\n",
126 smp_processor_id(), new_head, hv_ret);
127
128 for (off = q->head; off != new_head; off = spu_next_offset(q, off)) {
129 /* XXX ... XXX */
130 }
131
132 hv_ret = sun4v_ncs_sethead_marker(q->qhandle, new_head);
133 if (hv_ret == HV_EOK)
134 q->head = new_head;
135
136 spin_unlock(&q->lock);
137
138 return IRQ_HANDLED;
139}
140
141static irqreturn_t mau_intr(int irq, void *dev_id)
142{
143 struct spu_queue *q = dev_id;
144 unsigned long head, hv_ret;
145
146 spin_lock(&q->lock);
147
148 pr_err("CPU[%d]: Got MAU interrupt for qhdl[%lx]\n",
149 smp_processor_id(), q->qhandle);
150
151 hv_ret = sun4v_ncs_gethead(q->qhandle, &head);
152
153 pr_err("CPU[%d]: MAU gethead[%lx] hv_ret[%lu]\n",
154 smp_processor_id(), head, hv_ret);
155
156 sun4v_ncs_sethead_marker(q->qhandle, head);
157
158 spin_unlock(&q->lock);
159
160 return IRQ_HANDLED;
161}
162
163static void *spu_queue_next(struct spu_queue *q, void *cur)
164{
165 return q->q + spu_next_offset(q, cur - q->q);
166}
167
168static int spu_queue_num_free(struct spu_queue *q)
169{
170 unsigned long head = q->head;
171 unsigned long tail = q->tail;
172 unsigned long end = (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES);
173 unsigned long diff;
174
175 if (head > tail)
176 diff = head - tail;
177 else
178 diff = (end - tail) + head;
179
180 return (diff / CWQ_ENTRY_SIZE) - 1;
181}
182
183static void *spu_queue_alloc(struct spu_queue *q, int num_entries)
184{
185 int avail = spu_queue_num_free(q);
186
187 if (avail >= num_entries)
188 return q->q + q->tail;
189
190 return NULL;
191}
192
193static unsigned long spu_queue_submit(struct spu_queue *q, void *last)
194{
195 unsigned long hv_ret, new_tail;
196
197 new_tail = spu_next_offset(q, last - q->q);
198
199 hv_ret = sun4v_ncs_settail(q->qhandle, new_tail);
200 if (hv_ret == HV_EOK)
201 q->tail = new_tail;
202 return hv_ret;
203}
204
205static u64 control_word_base(unsigned int len, unsigned int hmac_key_len,
206 int enc_type, int auth_type,
207 unsigned int hash_len,
208 bool sfas, bool sob, bool eob, bool encrypt,
209 int opcode)
210{
211 u64 word = (len - 1) & CONTROL_LEN;
212
213 word |= ((u64) opcode << CONTROL_OPCODE_SHIFT);
214 word |= ((u64) enc_type << CONTROL_ENC_TYPE_SHIFT);
215 word |= ((u64) auth_type << CONTROL_AUTH_TYPE_SHIFT);
216 if (sfas)
217 word |= CONTROL_STORE_FINAL_AUTH_STATE;
218 if (sob)
219 word |= CONTROL_START_OF_BLOCK;
220 if (eob)
221 word |= CONTROL_END_OF_BLOCK;
222 if (encrypt)
223 word |= CONTROL_ENCRYPT;
224 if (hmac_key_len)
225 word |= ((u64) (hmac_key_len - 1)) << CONTROL_HMAC_KEY_LEN_SHIFT;
226 if (hash_len)
227 word |= ((u64) (hash_len - 1)) << CONTROL_HASH_LEN_SHIFT;
228
229 return word;
230}
231
232#if 0
233static inline bool n2_should_run_async(struct spu_queue *qp, int this_len)
234{
235 if (this_len >= 64 ||
236 qp->head != qp->tail)
237 return true;
238 return false;
239}
240#endif
241
242struct n2_ahash_alg {
243 struct list_head entry;
244 const u8 *hash_zero;
245 const u32 *hash_init;
246 u8 hw_op_hashsz;
247 u8 digest_size;
248 u8 auth_type;
249 u8 hmac_type;
250 struct ahash_alg alg;
251};
252
253static inline struct n2_ahash_alg *n2_ahash_alg(struct crypto_tfm *tfm)
254{
255 struct crypto_alg *alg = tfm->__crt_alg;
256 struct ahash_alg *ahash_alg;
257
258 ahash_alg = container_of(alg, struct ahash_alg, halg.base);
259
260 return container_of(ahash_alg, struct n2_ahash_alg, alg);
261}
262
263struct n2_hmac_alg {
264 const char *child_alg;
265 struct n2_ahash_alg derived;
266};
267
268static inline struct n2_hmac_alg *n2_hmac_alg(struct crypto_tfm *tfm)
269{
270 struct crypto_alg *alg = tfm->__crt_alg;
271 struct ahash_alg *ahash_alg;
272
273 ahash_alg = container_of(alg, struct ahash_alg, halg.base);
274
275 return container_of(ahash_alg, struct n2_hmac_alg, derived.alg);
276}
277
278struct n2_hash_ctx {
279 struct crypto_ahash *fallback_tfm;
280};
281
282#define N2_HASH_KEY_MAX 32 /* HW limit for all HMAC requests */
283
284struct n2_hmac_ctx {
285 struct n2_hash_ctx base;
286
287 struct crypto_shash *child_shash;
288
289 int hash_key_len;
290 unsigned char hash_key[N2_HASH_KEY_MAX];
291};
292
293struct n2_hash_req_ctx {
294 union {
295 struct md5_state md5;
296 struct sha1_state sha1;
297 struct sha256_state sha256;
298 } u;
299
300 struct ahash_request fallback_req;
301};
302
303static int n2_hash_async_init(struct ahash_request *req)
304{
305 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
306 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
307 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
308
309 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
310 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
311
312 return crypto_ahash_init(&rctx->fallback_req);
313}
314
315static int n2_hash_async_update(struct ahash_request *req)
316{
317 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
318 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
319 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
320
321 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
322 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
323 rctx->fallback_req.nbytes = req->nbytes;
324 rctx->fallback_req.src = req->src;
325
326 return crypto_ahash_update(&rctx->fallback_req);
327}
328
329static int n2_hash_async_final(struct ahash_request *req)
330{
331 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
332 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
333 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
334
335 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
336 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
337 rctx->fallback_req.result = req->result;
338
339 return crypto_ahash_final(&rctx->fallback_req);
340}
341
342static int n2_hash_async_finup(struct ahash_request *req)
343{
344 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
345 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
346 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
347
348 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
349 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
350 rctx->fallback_req.nbytes = req->nbytes;
351 rctx->fallback_req.src = req->src;
352 rctx->fallback_req.result = req->result;
353
354 return crypto_ahash_finup(&rctx->fallback_req);
355}
356
357static int n2_hash_cra_init(struct crypto_tfm *tfm)
358{
359 const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
360 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
361 struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
362 struct crypto_ahash *fallback_tfm;
363 int err;
364
365 fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
366 CRYPTO_ALG_NEED_FALLBACK);
367 if (IS_ERR(fallback_tfm)) {
368 pr_warning("Fallback driver '%s' could not be loaded!\n",
369 fallback_driver_name);
370 err = PTR_ERR(fallback_tfm);
371 goto out;
372 }
373
374 crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
375 crypto_ahash_reqsize(fallback_tfm)));
376
377 ctx->fallback_tfm = fallback_tfm;
378 return 0;
379
380out:
381 return err;
382}
383
384static void n2_hash_cra_exit(struct crypto_tfm *tfm)
385{
386 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
387 struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
388
389 crypto_free_ahash(ctx->fallback_tfm);
390}
391
392static int n2_hmac_cra_init(struct crypto_tfm *tfm)
393{
394 const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
395 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
396 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
397 struct n2_hmac_alg *n2alg = n2_hmac_alg(tfm);
398 struct crypto_ahash *fallback_tfm;
399 struct crypto_shash *child_shash;
400 int err;
401
402 fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
403 CRYPTO_ALG_NEED_FALLBACK);
404 if (IS_ERR(fallback_tfm)) {
405 pr_warning("Fallback driver '%s' could not be loaded!\n",
406 fallback_driver_name);
407 err = PTR_ERR(fallback_tfm);
408 goto out;
409 }
410
411 child_shash = crypto_alloc_shash(n2alg->child_alg, 0, 0);
412 if (IS_ERR(child_shash)) {
413 pr_warning("Child shash '%s' could not be loaded!\n",
414 n2alg->child_alg);
415 err = PTR_ERR(child_shash);
416 goto out_free_fallback;
417 }
418
419 crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
420 crypto_ahash_reqsize(fallback_tfm)));
421
422 ctx->child_shash = child_shash;
423 ctx->base.fallback_tfm = fallback_tfm;
424 return 0;
425
426out_free_fallback:
427 crypto_free_ahash(fallback_tfm);
428
429out:
430 return err;
431}
432
433static void n2_hmac_cra_exit(struct crypto_tfm *tfm)
434{
435 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
436 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
437
438 crypto_free_ahash(ctx->base.fallback_tfm);
439 crypto_free_shash(ctx->child_shash);
440}
441
442static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key,
443 unsigned int keylen)
444{
445 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
446 struct crypto_shash *child_shash = ctx->child_shash;
447 struct crypto_ahash *fallback_tfm;
448 SHASH_DESC_ON_STACK(shash, child_shash);
449 int err, bs, ds;
450
451 fallback_tfm = ctx->base.fallback_tfm;
452 err = crypto_ahash_setkey(fallback_tfm, key, keylen);
453 if (err)
454 return err;
455
456 shash->tfm = child_shash;
457 shash->flags = crypto_ahash_get_flags(tfm) &
458 CRYPTO_TFM_REQ_MAY_SLEEP;
459
460 bs = crypto_shash_blocksize(child_shash);
461 ds = crypto_shash_digestsize(child_shash);
462 BUG_ON(ds > N2_HASH_KEY_MAX);
463 if (keylen > bs) {
464 err = crypto_shash_digest(shash, key, keylen,
465 ctx->hash_key);
466 if (err)
467 return err;
468 keylen = ds;
469 } else if (keylen <= N2_HASH_KEY_MAX)
470 memcpy(ctx->hash_key, key, keylen);
471
472 ctx->hash_key_len = keylen;
473
474 return err;
475}
476
477static unsigned long wait_for_tail(struct spu_queue *qp)
478{
479 unsigned long head, hv_ret;
480
481 do {
482 hv_ret = sun4v_ncs_gethead(qp->qhandle, &head);
483 if (hv_ret != HV_EOK) {
484 pr_err("Hypervisor error on gethead\n");
485 break;
486 }
487 if (head == qp->tail) {
488 qp->head = head;
489 break;
490 }
491 } while (1);
492 return hv_ret;
493}
494
495static unsigned long submit_and_wait_for_tail(struct spu_queue *qp,
496 struct cwq_initial_entry *ent)
497{
498 unsigned long hv_ret = spu_queue_submit(qp, ent);
499
500 if (hv_ret == HV_EOK)
501 hv_ret = wait_for_tail(qp);
502
503 return hv_ret;
504}
505
506static int n2_do_async_digest(struct ahash_request *req,
507 unsigned int auth_type, unsigned int digest_size,
508 unsigned int result_size, void *hash_loc,
509 unsigned long auth_key, unsigned int auth_key_len)
510{
511 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
512 struct cwq_initial_entry *ent;
513 struct crypto_hash_walk walk;
514 struct spu_queue *qp;
515 unsigned long flags;
516 int err = -ENODEV;
517 int nbytes, cpu;
518
519 /* The total effective length of the operation may not
520 * exceed 2^16.
521 */
522 if (unlikely(req->nbytes > (1 << 16))) {
523 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
524 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
525
526 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
527 rctx->fallback_req.base.flags =
528 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
529 rctx->fallback_req.nbytes = req->nbytes;
530 rctx->fallback_req.src = req->src;
531 rctx->fallback_req.result = req->result;
532
533 return crypto_ahash_digest(&rctx->fallback_req);
534 }
535
536 nbytes = crypto_hash_walk_first(req, &walk);
537
538 cpu = get_cpu();
539 qp = cpu_to_cwq[cpu];
540 if (!qp)
541 goto out;
542
543 spin_lock_irqsave(&qp->lock, flags);
544
545 /* XXX can do better, improve this later by doing a by-hand scatterlist
546 * XXX walk, etc.
547 */
548 ent = qp->q + qp->tail;
549
550 ent->control = control_word_base(nbytes, auth_key_len, 0,
551 auth_type, digest_size,
552 false, true, false, false,
553 OPCODE_INPLACE_BIT |
554 OPCODE_AUTH_MAC);
555 ent->src_addr = __pa(walk.data);
556 ent->auth_key_addr = auth_key;
557 ent->auth_iv_addr = __pa(hash_loc);
558 ent->final_auth_state_addr = 0UL;
559 ent->enc_key_addr = 0UL;
560 ent->enc_iv_addr = 0UL;
561 ent->dest_addr = __pa(hash_loc);
562
563 nbytes = crypto_hash_walk_done(&walk, 0);
564 while (nbytes > 0) {
565 ent = spu_queue_next(qp, ent);
566
567 ent->control = (nbytes - 1);
568 ent->src_addr = __pa(walk.data);
569 ent->auth_key_addr = 0UL;
570 ent->auth_iv_addr = 0UL;
571 ent->final_auth_state_addr = 0UL;
572 ent->enc_key_addr = 0UL;
573 ent->enc_iv_addr = 0UL;
574 ent->dest_addr = 0UL;
575
576 nbytes = crypto_hash_walk_done(&walk, 0);
577 }
578 ent->control |= CONTROL_END_OF_BLOCK;
579
580 if (submit_and_wait_for_tail(qp, ent) != HV_EOK)
581 err = -EINVAL;
582 else
583 err = 0;
584
585 spin_unlock_irqrestore(&qp->lock, flags);
586
587 if (!err)
588 memcpy(req->result, hash_loc, result_size);
589out:
590 put_cpu();
591
592 return err;
593}
594
595static int n2_hash_async_digest(struct ahash_request *req)
596{
597 struct n2_ahash_alg *n2alg = n2_ahash_alg(req->base.tfm);
598 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
599 int ds;
600
601 ds = n2alg->digest_size;
602 if (unlikely(req->nbytes == 0)) {
603 memcpy(req->result, n2alg->hash_zero, ds);
604 return 0;
605 }
606 memcpy(&rctx->u, n2alg->hash_init, n2alg->hw_op_hashsz);
607
608 return n2_do_async_digest(req, n2alg->auth_type,
609 n2alg->hw_op_hashsz, ds,
610 &rctx->u, 0UL, 0);
611}
612
613static int n2_hmac_async_digest(struct ahash_request *req)
614{
615 struct n2_hmac_alg *n2alg = n2_hmac_alg(req->base.tfm);
616 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
617 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
618 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
619 int ds;
620
621 ds = n2alg->derived.digest_size;
622 if (unlikely(req->nbytes == 0) ||
623 unlikely(ctx->hash_key_len > N2_HASH_KEY_MAX)) {
624 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
625 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
626
627 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
628 rctx->fallback_req.base.flags =
629 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
630 rctx->fallback_req.nbytes = req->nbytes;
631 rctx->fallback_req.src = req->src;
632 rctx->fallback_req.result = req->result;
633
634 return crypto_ahash_digest(&rctx->fallback_req);
635 }
636 memcpy(&rctx->u, n2alg->derived.hash_init,
637 n2alg->derived.hw_op_hashsz);
638
639 return n2_do_async_digest(req, n2alg->derived.hmac_type,
640 n2alg->derived.hw_op_hashsz, ds,
641 &rctx->u,
642 __pa(&ctx->hash_key),
643 ctx->hash_key_len);
644}
645
646struct n2_cipher_context {
647 int key_len;
648 int enc_type;
649 union {
650 u8 aes[AES_MAX_KEY_SIZE];
651 u8 des[DES_KEY_SIZE];
652 u8 des3[3 * DES_KEY_SIZE];
653 u8 arc4[258]; /* S-box, X, Y */
654 } key;
655};
656
657#define N2_CHUNK_ARR_LEN 16
658
659struct n2_crypto_chunk {
660 struct list_head entry;
661 unsigned long iv_paddr : 44;
662 unsigned long arr_len : 20;
663 unsigned long dest_paddr;
664 unsigned long dest_final;
665 struct {
666 unsigned long src_paddr : 44;
667 unsigned long src_len : 20;
668 } arr[N2_CHUNK_ARR_LEN];
669};
670
671struct n2_request_context {
672 struct ablkcipher_walk walk;
673 struct list_head chunk_list;
674 struct n2_crypto_chunk chunk;
675 u8 temp_iv[16];
676};
677
678/* The SPU allows some level of flexibility for partial cipher blocks
679 * being specified in a descriptor.
680 *
681 * It merely requires that every descriptor's length field is at least
682 * as large as the cipher block size. This means that a cipher block
683 * can span at most 2 descriptors. However, this does not allow a
684 * partial block to span into the final descriptor as that would
685 * violate the rule (since every descriptor's length must be at lest
686 * the block size). So, for example, assuming an 8 byte block size:
687 *
688 * 0xe --> 0xa --> 0x8
689 *
690 * is a valid length sequence, whereas:
691 *
692 * 0xe --> 0xb --> 0x7
693 *
694 * is not a valid sequence.
695 */
696
697struct n2_cipher_alg {
698 struct list_head entry;
699 u8 enc_type;
700 struct crypto_alg alg;
701};
702
703static inline struct n2_cipher_alg *n2_cipher_alg(struct crypto_tfm *tfm)
704{
705 struct crypto_alg *alg = tfm->__crt_alg;
706
707 return container_of(alg, struct n2_cipher_alg, alg);
708}
709
710struct n2_cipher_request_context {
711 struct ablkcipher_walk walk;
712};
713
714static int n2_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
715 unsigned int keylen)
716{
717 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
718 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
719 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
720
721 ctx->enc_type = (n2alg->enc_type & ENC_TYPE_CHAINING_MASK);
722
723 switch (keylen) {
724 case AES_KEYSIZE_128:
725 ctx->enc_type |= ENC_TYPE_ALG_AES128;
726 break;
727 case AES_KEYSIZE_192:
728 ctx->enc_type |= ENC_TYPE_ALG_AES192;
729 break;
730 case AES_KEYSIZE_256:
731 ctx->enc_type |= ENC_TYPE_ALG_AES256;
732 break;
733 default:
734 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
735 return -EINVAL;
736 }
737
738 ctx->key_len = keylen;
739 memcpy(ctx->key.aes, key, keylen);
740 return 0;
741}
742
743static int n2_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
744 unsigned int keylen)
745{
746 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
747 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
748 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
749 u32 tmp[DES_EXPKEY_WORDS];
750 int err;
751
752 ctx->enc_type = n2alg->enc_type;
753
754 if (keylen != DES_KEY_SIZE) {
755 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
756 return -EINVAL;
757 }
758
759 err = des_ekey(tmp, key);
760 if (err == 0 && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
761 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
762 return -EINVAL;
763 }
764
765 ctx->key_len = keylen;
766 memcpy(ctx->key.des, key, keylen);
767 return 0;
768}
769
770static int n2_3des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
771 unsigned int keylen)
772{
773 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
774 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
775 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
776
777 ctx->enc_type = n2alg->enc_type;
778
779 if (keylen != (3 * DES_KEY_SIZE)) {
780 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
781 return -EINVAL;
782 }
783 ctx->key_len = keylen;
784 memcpy(ctx->key.des3, key, keylen);
785 return 0;
786}
787
788static int n2_arc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
789 unsigned int keylen)
790{
791 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
792 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
793 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
794 u8 *s = ctx->key.arc4;
795 u8 *x = s + 256;
796 u8 *y = x + 1;
797 int i, j, k;
798
799 ctx->enc_type = n2alg->enc_type;
800
801 j = k = 0;
802 *x = 0;
803 *y = 0;
804 for (i = 0; i < 256; i++)
805 s[i] = i;
806 for (i = 0; i < 256; i++) {
807 u8 a = s[i];
808 j = (j + key[k] + a) & 0xff;
809 s[i] = s[j];
810 s[j] = a;
811 if (++k >= keylen)
812 k = 0;
813 }
814
815 return 0;
816}
817
818static inline int cipher_descriptor_len(int nbytes, unsigned int block_size)
819{
820 int this_len = nbytes;
821
822 this_len -= (nbytes & (block_size - 1));
823 return this_len > (1 << 16) ? (1 << 16) : this_len;
824}
825
826static int __n2_crypt_chunk(struct crypto_tfm *tfm, struct n2_crypto_chunk *cp,
827 struct spu_queue *qp, bool encrypt)
828{
829 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
830 struct cwq_initial_entry *ent;
831 bool in_place;
832 int i;
833
834 ent = spu_queue_alloc(qp, cp->arr_len);
835 if (!ent) {
836 pr_info("queue_alloc() of %d fails\n",
837 cp->arr_len);
838 return -EBUSY;
839 }
840
841 in_place = (cp->dest_paddr == cp->arr[0].src_paddr);
842
843 ent->control = control_word_base(cp->arr[0].src_len,
844 0, ctx->enc_type, 0, 0,
845 false, true, false, encrypt,
846 OPCODE_ENCRYPT |
847 (in_place ? OPCODE_INPLACE_BIT : 0));
848 ent->src_addr = cp->arr[0].src_paddr;
849 ent->auth_key_addr = 0UL;
850 ent->auth_iv_addr = 0UL;
851 ent->final_auth_state_addr = 0UL;
852 ent->enc_key_addr = __pa(&ctx->key);
853 ent->enc_iv_addr = cp->iv_paddr;
854 ent->dest_addr = (in_place ? 0UL : cp->dest_paddr);
855
856 for (i = 1; i < cp->arr_len; i++) {
857 ent = spu_queue_next(qp, ent);
858
859 ent->control = cp->arr[i].src_len - 1;
860 ent->src_addr = cp->arr[i].src_paddr;
861 ent->auth_key_addr = 0UL;
862 ent->auth_iv_addr = 0UL;
863 ent->final_auth_state_addr = 0UL;
864 ent->enc_key_addr = 0UL;
865 ent->enc_iv_addr = 0UL;
866 ent->dest_addr = 0UL;
867 }
868 ent->control |= CONTROL_END_OF_BLOCK;
869
870 return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0;
871}
872
873static int n2_compute_chunks(struct ablkcipher_request *req)
874{
875 struct n2_request_context *rctx = ablkcipher_request_ctx(req);
876 struct ablkcipher_walk *walk = &rctx->walk;
877 struct n2_crypto_chunk *chunk;
878 unsigned long dest_prev;
879 unsigned int tot_len;
880 bool prev_in_place;
881 int err, nbytes;
882
883 ablkcipher_walk_init(walk, req->dst, req->src, req->nbytes);
884 err = ablkcipher_walk_phys(req, walk);
885 if (err)
886 return err;
887
888 INIT_LIST_HEAD(&rctx->chunk_list);
889
890 chunk = &rctx->chunk;
891 INIT_LIST_HEAD(&chunk->entry);
892
893 chunk->iv_paddr = 0UL;
894 chunk->arr_len = 0;
895 chunk->dest_paddr = 0UL;
896
897 prev_in_place = false;
898 dest_prev = ~0UL;
899 tot_len = 0;
900
901 while ((nbytes = walk->nbytes) != 0) {
902 unsigned long dest_paddr, src_paddr;
903 bool in_place;
904 int this_len;
905
906 src_paddr = (page_to_phys(walk->src.page) +
907 walk->src.offset);
908 dest_paddr = (page_to_phys(walk->dst.page) +
909 walk->dst.offset);
910 in_place = (src_paddr == dest_paddr);
911 this_len = cipher_descriptor_len(nbytes, walk->blocksize);
912
913 if (chunk->arr_len != 0) {
914 if (in_place != prev_in_place ||
915 (!prev_in_place &&
916 dest_paddr != dest_prev) ||
917 chunk->arr_len == N2_CHUNK_ARR_LEN ||
918 tot_len + this_len > (1 << 16)) {
919 chunk->dest_final = dest_prev;
920 list_add_tail(&chunk->entry,
921 &rctx->chunk_list);
922 chunk = kzalloc(sizeof(*chunk), GFP_ATOMIC);
923 if (!chunk) {
924 err = -ENOMEM;
925 break;
926 }
927 INIT_LIST_HEAD(&chunk->entry);
928 }
929 }
930 if (chunk->arr_len == 0) {
931 chunk->dest_paddr = dest_paddr;
932 tot_len = 0;
933 }
934 chunk->arr[chunk->arr_len].src_paddr = src_paddr;
935 chunk->arr[chunk->arr_len].src_len = this_len;
936 chunk->arr_len++;
937
938 dest_prev = dest_paddr + this_len;
939 prev_in_place = in_place;
940 tot_len += this_len;
941
942 err = ablkcipher_walk_done(req, walk, nbytes - this_len);
943 if (err)
944 break;
945 }
946 if (!err && chunk->arr_len != 0) {
947 chunk->dest_final = dest_prev;
948 list_add_tail(&chunk->entry, &rctx->chunk_list);
949 }
950
951 return err;
952}
953
954static void n2_chunk_complete(struct ablkcipher_request *req, void *final_iv)
955{
956 struct n2_request_context *rctx = ablkcipher_request_ctx(req);
957 struct n2_crypto_chunk *c, *tmp;
958
959 if (final_iv)
960 memcpy(rctx->walk.iv, final_iv, rctx->walk.blocksize);
961
962 ablkcipher_walk_complete(&rctx->walk);
963 list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
964 list_del(&c->entry);
965 if (unlikely(c != &rctx->chunk))
966 kfree(c);
967 }
968
969}
970
971static int n2_do_ecb(struct ablkcipher_request *req, bool encrypt)
972{
973 struct n2_request_context *rctx = ablkcipher_request_ctx(req);
974 struct crypto_tfm *tfm = req->base.tfm;
975 int err = n2_compute_chunks(req);
976 struct n2_crypto_chunk *c, *tmp;
977 unsigned long flags, hv_ret;
978 struct spu_queue *qp;
979
980 if (err)
981 return err;
982
983 qp = cpu_to_cwq[get_cpu()];
984 err = -ENODEV;
985 if (!qp)
986 goto out;
987
988 spin_lock_irqsave(&qp->lock, flags);
989
990 list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
991 err = __n2_crypt_chunk(tfm, c, qp, encrypt);
992 if (err)
993 break;
994 list_del(&c->entry);
995 if (unlikely(c != &rctx->chunk))
996 kfree(c);
997 }
998 if (!err) {
999 hv_ret = wait_for_tail(qp);
1000 if (hv_ret != HV_EOK)
1001 err = -EINVAL;
1002 }
1003
1004 spin_unlock_irqrestore(&qp->lock, flags);
1005
1006out:
1007 put_cpu();
1008
1009 n2_chunk_complete(req, NULL);
1010 return err;
1011}
1012
1013static int n2_encrypt_ecb(struct ablkcipher_request *req)
1014{
1015 return n2_do_ecb(req, true);
1016}
1017
1018static int n2_decrypt_ecb(struct ablkcipher_request *req)
1019{
1020 return n2_do_ecb(req, false);
1021}
1022
1023static int n2_do_chaining(struct ablkcipher_request *req, bool encrypt)
1024{
1025 struct n2_request_context *rctx = ablkcipher_request_ctx(req);
1026 struct crypto_tfm *tfm = req->base.tfm;
1027 unsigned long flags, hv_ret, iv_paddr;
1028 int err = n2_compute_chunks(req);
1029 struct n2_crypto_chunk *c, *tmp;
1030 struct spu_queue *qp;
1031 void *final_iv_addr;
1032
1033 final_iv_addr = NULL;
1034
1035 if (err)
1036 return err;
1037
1038 qp = cpu_to_cwq[get_cpu()];
1039 err = -ENODEV;
1040 if (!qp)
1041 goto out;
1042
1043 spin_lock_irqsave(&qp->lock, flags);
1044
1045 if (encrypt) {
1046 iv_paddr = __pa(rctx->walk.iv);
1047 list_for_each_entry_safe(c, tmp, &rctx->chunk_list,
1048 entry) {
1049 c->iv_paddr = iv_paddr;
1050 err = __n2_crypt_chunk(tfm, c, qp, true);
1051 if (err)
1052 break;
1053 iv_paddr = c->dest_final - rctx->walk.blocksize;
1054 list_del(&c->entry);
1055 if (unlikely(c != &rctx->chunk))
1056 kfree(c);
1057 }
1058 final_iv_addr = __va(iv_paddr);
1059 } else {
1060 list_for_each_entry_safe_reverse(c, tmp, &rctx->chunk_list,
1061 entry) {
1062 if (c == &rctx->chunk) {
1063 iv_paddr = __pa(rctx->walk.iv);
1064 } else {
1065 iv_paddr = (tmp->arr[tmp->arr_len-1].src_paddr +
1066 tmp->arr[tmp->arr_len-1].src_len -
1067 rctx->walk.blocksize);
1068 }
1069 if (!final_iv_addr) {
1070 unsigned long pa;
1071
1072 pa = (c->arr[c->arr_len-1].src_paddr +
1073 c->arr[c->arr_len-1].src_len -
1074 rctx->walk.blocksize);
1075 final_iv_addr = rctx->temp_iv;
1076 memcpy(rctx->temp_iv, __va(pa),
1077 rctx->walk.blocksize);
1078 }
1079 c->iv_paddr = iv_paddr;
1080 err = __n2_crypt_chunk(tfm, c, qp, false);
1081 if (err)
1082 break;
1083 list_del(&c->entry);
1084 if (unlikely(c != &rctx->chunk))
1085 kfree(c);
1086 }
1087 }
1088 if (!err) {
1089 hv_ret = wait_for_tail(qp);
1090 if (hv_ret != HV_EOK)
1091 err = -EINVAL;
1092 }
1093
1094 spin_unlock_irqrestore(&qp->lock, flags);
1095
1096out:
1097 put_cpu();
1098
1099 n2_chunk_complete(req, err ? NULL : final_iv_addr);
1100 return err;
1101}
1102
1103static int n2_encrypt_chaining(struct ablkcipher_request *req)
1104{
1105 return n2_do_chaining(req, true);
1106}
1107
1108static int n2_decrypt_chaining(struct ablkcipher_request *req)
1109{
1110 return n2_do_chaining(req, false);
1111}
1112
1113struct n2_cipher_tmpl {
1114 const char *name;
1115 const char *drv_name;
1116 u8 block_size;
1117 u8 enc_type;
1118 struct ablkcipher_alg ablkcipher;
1119};
1120
1121static const struct n2_cipher_tmpl cipher_tmpls[] = {
1122 /* ARC4: only ECB is supported (chaining bits ignored) */
1123 { .name = "ecb(arc4)",
1124 .drv_name = "ecb-arc4",
1125 .block_size = 1,
1126 .enc_type = (ENC_TYPE_ALG_RC4_STREAM |
1127 ENC_TYPE_CHAINING_ECB),
1128 .ablkcipher = {
1129 .min_keysize = 1,
1130 .max_keysize = 256,
1131 .setkey = n2_arc4_setkey,
1132 .encrypt = n2_encrypt_ecb,
1133 .decrypt = n2_decrypt_ecb,
1134 },
1135 },
1136
1137 /* DES: ECB CBC and CFB are supported */
1138 { .name = "ecb(des)",
1139 .drv_name = "ecb-des",
1140 .block_size = DES_BLOCK_SIZE,
1141 .enc_type = (ENC_TYPE_ALG_DES |
1142 ENC_TYPE_CHAINING_ECB),
1143 .ablkcipher = {
1144 .min_keysize = DES_KEY_SIZE,
1145 .max_keysize = DES_KEY_SIZE,
1146 .setkey = n2_des_setkey,
1147 .encrypt = n2_encrypt_ecb,
1148 .decrypt = n2_decrypt_ecb,
1149 },
1150 },
1151 { .name = "cbc(des)",
1152 .drv_name = "cbc-des",
1153 .block_size = DES_BLOCK_SIZE,
1154 .enc_type = (ENC_TYPE_ALG_DES |
1155 ENC_TYPE_CHAINING_CBC),
1156 .ablkcipher = {
1157 .ivsize = DES_BLOCK_SIZE,
1158 .min_keysize = DES_KEY_SIZE,
1159 .max_keysize = DES_KEY_SIZE,
1160 .setkey = n2_des_setkey,
1161 .encrypt = n2_encrypt_chaining,
1162 .decrypt = n2_decrypt_chaining,
1163 },
1164 },
1165 { .name = "cfb(des)",
1166 .drv_name = "cfb-des",
1167 .block_size = DES_BLOCK_SIZE,
1168 .enc_type = (ENC_TYPE_ALG_DES |
1169 ENC_TYPE_CHAINING_CFB),
1170 .ablkcipher = {
1171 .min_keysize = DES_KEY_SIZE,
1172 .max_keysize = DES_KEY_SIZE,
1173 .setkey = n2_des_setkey,
1174 .encrypt = n2_encrypt_chaining,
1175 .decrypt = n2_decrypt_chaining,
1176 },
1177 },
1178
1179 /* 3DES: ECB CBC and CFB are supported */
1180 { .name = "ecb(des3_ede)",
1181 .drv_name = "ecb-3des",
1182 .block_size = DES_BLOCK_SIZE,
1183 .enc_type = (ENC_TYPE_ALG_3DES |
1184 ENC_TYPE_CHAINING_ECB),
1185 .ablkcipher = {
1186 .min_keysize = 3 * DES_KEY_SIZE,
1187 .max_keysize = 3 * DES_KEY_SIZE,
1188 .setkey = n2_3des_setkey,
1189 .encrypt = n2_encrypt_ecb,
1190 .decrypt = n2_decrypt_ecb,
1191 },
1192 },
1193 { .name = "cbc(des3_ede)",
1194 .drv_name = "cbc-3des",
1195 .block_size = DES_BLOCK_SIZE,
1196 .enc_type = (ENC_TYPE_ALG_3DES |
1197 ENC_TYPE_CHAINING_CBC),
1198 .ablkcipher = {
1199 .ivsize = DES_BLOCK_SIZE,
1200 .min_keysize = 3 * DES_KEY_SIZE,
1201 .max_keysize = 3 * DES_KEY_SIZE,
1202 .setkey = n2_3des_setkey,
1203 .encrypt = n2_encrypt_chaining,
1204 .decrypt = n2_decrypt_chaining,
1205 },
1206 },
1207 { .name = "cfb(des3_ede)",
1208 .drv_name = "cfb-3des",
1209 .block_size = DES_BLOCK_SIZE,
1210 .enc_type = (ENC_TYPE_ALG_3DES |
1211 ENC_TYPE_CHAINING_CFB),
1212 .ablkcipher = {
1213 .min_keysize = 3 * DES_KEY_SIZE,
1214 .max_keysize = 3 * DES_KEY_SIZE,
1215 .setkey = n2_3des_setkey,
1216 .encrypt = n2_encrypt_chaining,
1217 .decrypt = n2_decrypt_chaining,
1218 },
1219 },
1220 /* AES: ECB CBC and CTR are supported */
1221 { .name = "ecb(aes)",
1222 .drv_name = "ecb-aes",
1223 .block_size = AES_BLOCK_SIZE,
1224 .enc_type = (ENC_TYPE_ALG_AES128 |
1225 ENC_TYPE_CHAINING_ECB),
1226 .ablkcipher = {
1227 .min_keysize = AES_MIN_KEY_SIZE,
1228 .max_keysize = AES_MAX_KEY_SIZE,
1229 .setkey = n2_aes_setkey,
1230 .encrypt = n2_encrypt_ecb,
1231 .decrypt = n2_decrypt_ecb,
1232 },
1233 },
1234 { .name = "cbc(aes)",
1235 .drv_name = "cbc-aes",
1236 .block_size = AES_BLOCK_SIZE,
1237 .enc_type = (ENC_TYPE_ALG_AES128 |
1238 ENC_TYPE_CHAINING_CBC),
1239 .ablkcipher = {
1240 .ivsize = AES_BLOCK_SIZE,
1241 .min_keysize = AES_MIN_KEY_SIZE,
1242 .max_keysize = AES_MAX_KEY_SIZE,
1243 .setkey = n2_aes_setkey,
1244 .encrypt = n2_encrypt_chaining,
1245 .decrypt = n2_decrypt_chaining,
1246 },
1247 },
1248 { .name = "ctr(aes)",
1249 .drv_name = "ctr-aes",
1250 .block_size = AES_BLOCK_SIZE,
1251 .enc_type = (ENC_TYPE_ALG_AES128 |
1252 ENC_TYPE_CHAINING_COUNTER),
1253 .ablkcipher = {
1254 .ivsize = AES_BLOCK_SIZE,
1255 .min_keysize = AES_MIN_KEY_SIZE,
1256 .max_keysize = AES_MAX_KEY_SIZE,
1257 .setkey = n2_aes_setkey,
1258 .encrypt = n2_encrypt_chaining,
1259 .decrypt = n2_encrypt_chaining,
1260 },
1261 },
1262
1263};
1264#define NUM_CIPHER_TMPLS ARRAY_SIZE(cipher_tmpls)
1265
1266static LIST_HEAD(cipher_algs);
1267
1268struct n2_hash_tmpl {
1269 const char *name;
1270 const u8 *hash_zero;
1271 const u32 *hash_init;
1272 u8 hw_op_hashsz;
1273 u8 digest_size;
1274 u8 block_size;
1275 u8 auth_type;
1276 u8 hmac_type;
1277};
1278
1279static const u32 md5_init[MD5_HASH_WORDS] = {
1280 cpu_to_le32(MD5_H0),
1281 cpu_to_le32(MD5_H1),
1282 cpu_to_le32(MD5_H2),
1283 cpu_to_le32(MD5_H3),
1284};
1285static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = {
1286 SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4,
1287};
1288static const u32 sha256_init[SHA256_DIGEST_SIZE / 4] = {
1289 SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
1290 SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7,
1291};
1292static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = {
1293 SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
1294 SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7,
1295};
1296
1297static const struct n2_hash_tmpl hash_tmpls[] = {
1298 { .name = "md5",
1299 .hash_zero = md5_zero_message_hash,
1300 .hash_init = md5_init,
1301 .auth_type = AUTH_TYPE_MD5,
1302 .hmac_type = AUTH_TYPE_HMAC_MD5,
1303 .hw_op_hashsz = MD5_DIGEST_SIZE,
1304 .digest_size = MD5_DIGEST_SIZE,
1305 .block_size = MD5_HMAC_BLOCK_SIZE },
1306 { .name = "sha1",
1307 .hash_zero = sha1_zero_message_hash,
1308 .hash_init = sha1_init,
1309 .auth_type = AUTH_TYPE_SHA1,
1310 .hmac_type = AUTH_TYPE_HMAC_SHA1,
1311 .hw_op_hashsz = SHA1_DIGEST_SIZE,
1312 .digest_size = SHA1_DIGEST_SIZE,
1313 .block_size = SHA1_BLOCK_SIZE },
1314 { .name = "sha256",
1315 .hash_zero = sha256_zero_message_hash,
1316 .hash_init = sha256_init,
1317 .auth_type = AUTH_TYPE_SHA256,
1318 .hmac_type = AUTH_TYPE_HMAC_SHA256,
1319 .hw_op_hashsz = SHA256_DIGEST_SIZE,
1320 .digest_size = SHA256_DIGEST_SIZE,
1321 .block_size = SHA256_BLOCK_SIZE },
1322 { .name = "sha224",
1323 .hash_zero = sha224_zero_message_hash,
1324 .hash_init = sha224_init,
1325 .auth_type = AUTH_TYPE_SHA256,
1326 .hmac_type = AUTH_TYPE_RESERVED,
1327 .hw_op_hashsz = SHA256_DIGEST_SIZE,
1328 .digest_size = SHA224_DIGEST_SIZE,
1329 .block_size = SHA224_BLOCK_SIZE },
1330};
1331#define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls)
1332
1333static LIST_HEAD(ahash_algs);
1334static LIST_HEAD(hmac_algs);
1335
1336static int algs_registered;
1337
1338static void __n2_unregister_algs(void)
1339{
1340 struct n2_cipher_alg *cipher, *cipher_tmp;
1341 struct n2_ahash_alg *alg, *alg_tmp;
1342 struct n2_hmac_alg *hmac, *hmac_tmp;
1343
1344 list_for_each_entry_safe(cipher, cipher_tmp, &cipher_algs, entry) {
1345 crypto_unregister_alg(&cipher->alg);
1346 list_del(&cipher->entry);
1347 kfree(cipher);
1348 }
1349 list_for_each_entry_safe(hmac, hmac_tmp, &hmac_algs, derived.entry) {
1350 crypto_unregister_ahash(&hmac->derived.alg);
1351 list_del(&hmac->derived.entry);
1352 kfree(hmac);
1353 }
1354 list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) {
1355 crypto_unregister_ahash(&alg->alg);
1356 list_del(&alg->entry);
1357 kfree(alg);
1358 }
1359}
1360
1361static int n2_cipher_cra_init(struct crypto_tfm *tfm)
1362{
1363 tfm->crt_ablkcipher.reqsize = sizeof(struct n2_request_context);
1364 return 0;
1365}
1366
1367static int __n2_register_one_cipher(const struct n2_cipher_tmpl *tmpl)
1368{
1369 struct n2_cipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1370 struct crypto_alg *alg;
1371 int err;
1372
1373 if (!p)
1374 return -ENOMEM;
1375
1376 alg = &p->alg;
1377
1378 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
1379 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name);
1380 alg->cra_priority = N2_CRA_PRIORITY;
1381 alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1382 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC;
1383 alg->cra_blocksize = tmpl->block_size;
1384 p->enc_type = tmpl->enc_type;
1385 alg->cra_ctxsize = sizeof(struct n2_cipher_context);
1386 alg->cra_type = &crypto_ablkcipher_type;
1387 alg->cra_u.ablkcipher = tmpl->ablkcipher;
1388 alg->cra_init = n2_cipher_cra_init;
1389 alg->cra_module = THIS_MODULE;
1390
1391 list_add(&p->entry, &cipher_algs);
1392 err = crypto_register_alg(alg);
1393 if (err) {
1394 pr_err("%s alg registration failed\n", alg->cra_name);
1395 list_del(&p->entry);
1396 kfree(p);
1397 } else {
1398 pr_info("%s alg registered\n", alg->cra_name);
1399 }
1400 return err;
1401}
1402
1403static int __n2_register_one_hmac(struct n2_ahash_alg *n2ahash)
1404{
1405 struct n2_hmac_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1406 struct ahash_alg *ahash;
1407 struct crypto_alg *base;
1408 int err;
1409
1410 if (!p)
1411 return -ENOMEM;
1412
1413 p->child_alg = n2ahash->alg.halg.base.cra_name;
1414 memcpy(&p->derived, n2ahash, sizeof(struct n2_ahash_alg));
1415 INIT_LIST_HEAD(&p->derived.entry);
1416
1417 ahash = &p->derived.alg;
1418 ahash->digest = n2_hmac_async_digest;
1419 ahash->setkey = n2_hmac_async_setkey;
1420
1421 base = &ahash->halg.base;
1422 snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", p->child_alg);
1423 snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s-n2", p->child_alg);
1424
1425 base->cra_ctxsize = sizeof(struct n2_hmac_ctx);
1426 base->cra_init = n2_hmac_cra_init;
1427 base->cra_exit = n2_hmac_cra_exit;
1428
1429 list_add(&p->derived.entry, &hmac_algs);
1430 err = crypto_register_ahash(ahash);
1431 if (err) {
1432 pr_err("%s alg registration failed\n", base->cra_name);
1433 list_del(&p->derived.entry);
1434 kfree(p);
1435 } else {
1436 pr_info("%s alg registered\n", base->cra_name);
1437 }
1438 return err;
1439}
1440
1441static int __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
1442{
1443 struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1444 struct hash_alg_common *halg;
1445 struct crypto_alg *base;
1446 struct ahash_alg *ahash;
1447 int err;
1448
1449 if (!p)
1450 return -ENOMEM;
1451
1452 p->hash_zero = tmpl->hash_zero;
1453 p->hash_init = tmpl->hash_init;
1454 p->auth_type = tmpl->auth_type;
1455 p->hmac_type = tmpl->hmac_type;
1456 p->hw_op_hashsz = tmpl->hw_op_hashsz;
1457 p->digest_size = tmpl->digest_size;
1458
1459 ahash = &p->alg;
1460 ahash->init = n2_hash_async_init;
1461 ahash->update = n2_hash_async_update;
1462 ahash->final = n2_hash_async_final;
1463 ahash->finup = n2_hash_async_finup;
1464 ahash->digest = n2_hash_async_digest;
1465
1466 halg = &ahash->halg;
1467 halg->digestsize = tmpl->digest_size;
1468
1469 base = &halg->base;
1470 snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
1471 snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name);
1472 base->cra_priority = N2_CRA_PRIORITY;
1473 base->cra_flags = CRYPTO_ALG_TYPE_AHASH |
1474 CRYPTO_ALG_KERN_DRIVER_ONLY |
1475 CRYPTO_ALG_NEED_FALLBACK;
1476 base->cra_blocksize = tmpl->block_size;
1477 base->cra_ctxsize = sizeof(struct n2_hash_ctx);
1478 base->cra_module = THIS_MODULE;
1479 base->cra_init = n2_hash_cra_init;
1480 base->cra_exit = n2_hash_cra_exit;
1481
1482 list_add(&p->entry, &ahash_algs);
1483 err = crypto_register_ahash(ahash);
1484 if (err) {
1485 pr_err("%s alg registration failed\n", base->cra_name);
1486 list_del(&p->entry);
1487 kfree(p);
1488 } else {
1489 pr_info("%s alg registered\n", base->cra_name);
1490 }
1491 if (!err && p->hmac_type != AUTH_TYPE_RESERVED)
1492 err = __n2_register_one_hmac(p);
1493 return err;
1494}
1495
1496static int n2_register_algs(void)
1497{
1498 int i, err = 0;
1499
1500 mutex_lock(&spu_lock);
1501 if (algs_registered++)
1502 goto out;
1503
1504 for (i = 0; i < NUM_HASH_TMPLS; i++) {
1505 err = __n2_register_one_ahash(&hash_tmpls[i]);
1506 if (err) {
1507 __n2_unregister_algs();
1508 goto out;
1509 }
1510 }
1511 for (i = 0; i < NUM_CIPHER_TMPLS; i++) {
1512 err = __n2_register_one_cipher(&cipher_tmpls[i]);
1513 if (err) {
1514 __n2_unregister_algs();
1515 goto out;
1516 }
1517 }
1518
1519out:
1520 mutex_unlock(&spu_lock);
1521 return err;
1522}
1523
1524static void n2_unregister_algs(void)
1525{
1526 mutex_lock(&spu_lock);
1527 if (!--algs_registered)
1528 __n2_unregister_algs();
1529 mutex_unlock(&spu_lock);
1530}
1531
1532/* To map CWQ queues to interrupt sources, the hypervisor API provides
1533 * a devino. This isn't very useful to us because all of the
1534 * interrupts listed in the device_node have been translated to
1535 * Linux virtual IRQ cookie numbers.
1536 *
1537 * So we have to back-translate, going through the 'intr' and 'ino'
1538 * property tables of the n2cp MDESC node, matching it with the OF
1539 * 'interrupts' property entries, in order to to figure out which
1540 * devino goes to which already-translated IRQ.
1541 */
1542static int find_devino_index(struct platform_device *dev, struct spu_mdesc_info *ip,
1543 unsigned long dev_ino)
1544{
1545 const unsigned int *dev_intrs;
1546 unsigned int intr;
1547 int i;
1548
1549 for (i = 0; i < ip->num_intrs; i++) {
1550 if (ip->ino_table[i].ino == dev_ino)
1551 break;
1552 }
1553 if (i == ip->num_intrs)
1554 return -ENODEV;
1555
1556 intr = ip->ino_table[i].intr;
1557
1558 dev_intrs = of_get_property(dev->dev.of_node, "interrupts", NULL);
1559 if (!dev_intrs)
1560 return -ENODEV;
1561
1562 for (i = 0; i < dev->archdata.num_irqs; i++) {
1563 if (dev_intrs[i] == intr)
1564 return i;
1565 }
1566
1567 return -ENODEV;
1568}
1569
1570static int spu_map_ino(struct platform_device *dev, struct spu_mdesc_info *ip,
1571 const char *irq_name, struct spu_queue *p,
1572 irq_handler_t handler)
1573{
1574 unsigned long herr;
1575 int index;
1576
1577 herr = sun4v_ncs_qhandle_to_devino(p->qhandle, &p->devino);
1578 if (herr)
1579 return -EINVAL;
1580
1581 index = find_devino_index(dev, ip, p->devino);
1582 if (index < 0)
1583 return index;
1584
1585 p->irq = dev->archdata.irqs[index];
1586
1587 sprintf(p->irq_name, "%s-%d", irq_name, index);
1588
1589 return request_irq(p->irq, handler, 0, p->irq_name, p);
1590}
1591
1592static struct kmem_cache *queue_cache[2];
1593
1594static void *new_queue(unsigned long q_type)
1595{
1596 return kmem_cache_zalloc(queue_cache[q_type - 1], GFP_KERNEL);
1597}
1598
1599static void free_queue(void *p, unsigned long q_type)
1600{
1601 return kmem_cache_free(queue_cache[q_type - 1], p);
1602}
1603
1604static int queue_cache_init(void)
1605{
1606 if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
1607 queue_cache[HV_NCS_QTYPE_MAU - 1] =
1608 kmem_cache_create("mau_queue",
1609 (MAU_NUM_ENTRIES *
1610 MAU_ENTRY_SIZE),
1611 MAU_ENTRY_SIZE, 0, NULL);
1612 if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
1613 return -ENOMEM;
1614
1615 if (!queue_cache[HV_NCS_QTYPE_CWQ - 1])
1616 queue_cache[HV_NCS_QTYPE_CWQ - 1] =
1617 kmem_cache_create("cwq_queue",
1618 (CWQ_NUM_ENTRIES *
1619 CWQ_ENTRY_SIZE),
1620 CWQ_ENTRY_SIZE, 0, NULL);
1621 if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) {
1622 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
1623 return -ENOMEM;
1624 }
1625 return 0;
1626}
1627
1628static void queue_cache_destroy(void)
1629{
1630 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
1631 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
1632}
1633
1634static int spu_queue_register(struct spu_queue *p, unsigned long q_type)
1635{
1636 cpumask_var_t old_allowed;
1637 unsigned long hv_ret;
1638
1639 if (cpumask_empty(&p->sharing))
1640 return -EINVAL;
1641
1642 if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL))
1643 return -ENOMEM;
1644
1645 cpumask_copy(old_allowed, ¤t->cpus_allowed);
1646
1647 set_cpus_allowed_ptr(current, &p->sharing);
1648
1649 hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q),
1650 CWQ_NUM_ENTRIES, &p->qhandle);
1651 if (!hv_ret)
1652 sun4v_ncs_sethead_marker(p->qhandle, 0);
1653
1654 set_cpus_allowed_ptr(current, old_allowed);
1655
1656 free_cpumask_var(old_allowed);
1657
1658 return (hv_ret ? -EINVAL : 0);
1659}
1660
1661static int spu_queue_setup(struct spu_queue *p)
1662{
1663 int err;
1664
1665 p->q = new_queue(p->q_type);
1666 if (!p->q)
1667 return -ENOMEM;
1668
1669 err = spu_queue_register(p, p->q_type);
1670 if (err) {
1671 free_queue(p->q, p->q_type);
1672 p->q = NULL;
1673 }
1674
1675 return err;
1676}
1677
1678static void spu_queue_destroy(struct spu_queue *p)
1679{
1680 unsigned long hv_ret;
1681
1682 if (!p->q)
1683 return;
1684
1685 hv_ret = sun4v_ncs_qconf(p->q_type, p->qhandle, 0, &p->qhandle);
1686
1687 if (!hv_ret)
1688 free_queue(p->q, p->q_type);
1689}
1690
1691static void spu_list_destroy(struct list_head *list)
1692{
1693 struct spu_queue *p, *n;
1694
1695 list_for_each_entry_safe(p, n, list, list) {
1696 int i;
1697
1698 for (i = 0; i < NR_CPUS; i++) {
1699 if (cpu_to_cwq[i] == p)
1700 cpu_to_cwq[i] = NULL;
1701 }
1702
1703 if (p->irq) {
1704 free_irq(p->irq, p);
1705 p->irq = 0;
1706 }
1707 spu_queue_destroy(p);
1708 list_del(&p->list);
1709 kfree(p);
1710 }
1711}
1712
1713/* Walk the backward arcs of a CWQ 'exec-unit' node,
1714 * gathering cpu membership information.
1715 */
1716static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc,
1717 struct platform_device *dev,
1718 u64 node, struct spu_queue *p,
1719 struct spu_queue **table)
1720{
1721 u64 arc;
1722
1723 mdesc_for_each_arc(arc, mdesc, node, MDESC_ARC_TYPE_BACK) {
1724 u64 tgt = mdesc_arc_target(mdesc, arc);
1725 const char *name = mdesc_node_name(mdesc, tgt);
1726 const u64 *id;
1727
1728 if (strcmp(name, "cpu"))
1729 continue;
1730 id = mdesc_get_property(mdesc, tgt, "id", NULL);
1731 if (table[*id] != NULL) {
1732 dev_err(&dev->dev, "%s: SPU cpu slot already set.\n",
1733 dev->dev.of_node->full_name);
1734 return -EINVAL;
1735 }
1736 cpumask_set_cpu(*id, &p->sharing);
1737 table[*id] = p;
1738 }
1739 return 0;
1740}
1741
1742/* Process an 'exec-unit' MDESC node of type 'cwq'. */
1743static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list,
1744 struct platform_device *dev, struct mdesc_handle *mdesc,
1745 u64 node, const char *iname, unsigned long q_type,
1746 irq_handler_t handler, struct spu_queue **table)
1747{
1748 struct spu_queue *p;
1749 int err;
1750
1751 p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL);
1752 if (!p) {
1753 dev_err(&dev->dev, "%s: Could not allocate SPU queue.\n",
1754 dev->dev.of_node->full_name);
1755 return -ENOMEM;
1756 }
1757
1758 cpumask_clear(&p->sharing);
1759 spin_lock_init(&p->lock);
1760 p->q_type = q_type;
1761 INIT_LIST_HEAD(&p->jobs);
1762 list_add(&p->list, list);
1763
1764 err = spu_mdesc_walk_arcs(mdesc, dev, node, p, table);
1765 if (err)
1766 return err;
1767
1768 err = spu_queue_setup(p);
1769 if (err)
1770 return err;
1771
1772 return spu_map_ino(dev, ip, iname, p, handler);
1773}
1774
1775static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct platform_device *dev,
1776 struct spu_mdesc_info *ip, struct list_head *list,
1777 const char *exec_name, unsigned long q_type,
1778 irq_handler_t handler, struct spu_queue **table)
1779{
1780 int err = 0;
1781 u64 node;
1782
1783 mdesc_for_each_node_by_name(mdesc, node, "exec-unit") {
1784 const char *type;
1785
1786 type = mdesc_get_property(mdesc, node, "type", NULL);
1787 if (!type || strcmp(type, exec_name))
1788 continue;
1789
1790 err = handle_exec_unit(ip, list, dev, mdesc, node,
1791 exec_name, q_type, handler, table);
1792 if (err) {
1793 spu_list_destroy(list);
1794 break;
1795 }
1796 }
1797
1798 return err;
1799}
1800
1801static int get_irq_props(struct mdesc_handle *mdesc, u64 node,
1802 struct spu_mdesc_info *ip)
1803{
1804 const u64 *ino;
1805 int ino_len;
1806 int i;
1807
1808 ino = mdesc_get_property(mdesc, node, "ino", &ino_len);
1809 if (!ino) {
1810 printk("NO 'ino'\n");
1811 return -ENODEV;
1812 }
1813
1814 ip->num_intrs = ino_len / sizeof(u64);
1815 ip->ino_table = kzalloc((sizeof(struct ino_blob) *
1816 ip->num_intrs),
1817 GFP_KERNEL);
1818 if (!ip->ino_table)
1819 return -ENOMEM;
1820
1821 for (i = 0; i < ip->num_intrs; i++) {
1822 struct ino_blob *b = &ip->ino_table[i];
1823 b->intr = i + 1;
1824 b->ino = ino[i];
1825 }
1826
1827 return 0;
1828}
1829
1830static int grab_mdesc_irq_props(struct mdesc_handle *mdesc,
1831 struct platform_device *dev,
1832 struct spu_mdesc_info *ip,
1833 const char *node_name)
1834{
1835 const unsigned int *reg;
1836 u64 node;
1837
1838 reg = of_get_property(dev->dev.of_node, "reg", NULL);
1839 if (!reg)
1840 return -ENODEV;
1841
1842 mdesc_for_each_node_by_name(mdesc, node, "virtual-device") {
1843 const char *name;
1844 const u64 *chdl;
1845
1846 name = mdesc_get_property(mdesc, node, "name", NULL);
1847 if (!name || strcmp(name, node_name))
1848 continue;
1849 chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL);
1850 if (!chdl || (*chdl != *reg))
1851 continue;
1852 ip->cfg_handle = *chdl;
1853 return get_irq_props(mdesc, node, ip);
1854 }
1855
1856 return -ENODEV;
1857}
1858
1859static unsigned long n2_spu_hvapi_major;
1860static unsigned long n2_spu_hvapi_minor;
1861
1862static int n2_spu_hvapi_register(void)
1863{
1864 int err;
1865
1866 n2_spu_hvapi_major = 2;
1867 n2_spu_hvapi_minor = 0;
1868
1869 err = sun4v_hvapi_register(HV_GRP_NCS,
1870 n2_spu_hvapi_major,
1871 &n2_spu_hvapi_minor);
1872
1873 if (!err)
1874 pr_info("Registered NCS HVAPI version %lu.%lu\n",
1875 n2_spu_hvapi_major,
1876 n2_spu_hvapi_minor);
1877
1878 return err;
1879}
1880
1881static void n2_spu_hvapi_unregister(void)
1882{
1883 sun4v_hvapi_unregister(HV_GRP_NCS);
1884}
1885
1886static int global_ref;
1887
1888static int grab_global_resources(void)
1889{
1890 int err = 0;
1891
1892 mutex_lock(&spu_lock);
1893
1894 if (global_ref++)
1895 goto out;
1896
1897 err = n2_spu_hvapi_register();
1898 if (err)
1899 goto out;
1900
1901 err = queue_cache_init();
1902 if (err)
1903 goto out_hvapi_release;
1904
1905 err = -ENOMEM;
1906 cpu_to_cwq = kzalloc(sizeof(struct spu_queue *) * NR_CPUS,
1907 GFP_KERNEL);
1908 if (!cpu_to_cwq)
1909 goto out_queue_cache_destroy;
1910
1911 cpu_to_mau = kzalloc(sizeof(struct spu_queue *) * NR_CPUS,
1912 GFP_KERNEL);
1913 if (!cpu_to_mau)
1914 goto out_free_cwq_table;
1915
1916 err = 0;
1917
1918out:
1919 if (err)
1920 global_ref--;
1921 mutex_unlock(&spu_lock);
1922 return err;
1923
1924out_free_cwq_table:
1925 kfree(cpu_to_cwq);
1926 cpu_to_cwq = NULL;
1927
1928out_queue_cache_destroy:
1929 queue_cache_destroy();
1930
1931out_hvapi_release:
1932 n2_spu_hvapi_unregister();
1933 goto out;
1934}
1935
1936static void release_global_resources(void)
1937{
1938 mutex_lock(&spu_lock);
1939 if (!--global_ref) {
1940 kfree(cpu_to_cwq);
1941 cpu_to_cwq = NULL;
1942
1943 kfree(cpu_to_mau);
1944 cpu_to_mau = NULL;
1945
1946 queue_cache_destroy();
1947 n2_spu_hvapi_unregister();
1948 }
1949 mutex_unlock(&spu_lock);
1950}
1951
1952static struct n2_crypto *alloc_n2cp(void)
1953{
1954 struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL);
1955
1956 if (np)
1957 INIT_LIST_HEAD(&np->cwq_list);
1958
1959 return np;
1960}
1961
1962static void free_n2cp(struct n2_crypto *np)
1963{
1964 if (np->cwq_info.ino_table) {
1965 kfree(np->cwq_info.ino_table);
1966 np->cwq_info.ino_table = NULL;
1967 }
1968
1969 kfree(np);
1970}
1971
1972static void n2_spu_driver_version(void)
1973{
1974 static int n2_spu_version_printed;
1975
1976 if (n2_spu_version_printed++ == 0)
1977 pr_info("%s", version);
1978}
1979
1980static int n2_crypto_probe(struct platform_device *dev)
1981{
1982 struct mdesc_handle *mdesc;
1983 const char *full_name;
1984 struct n2_crypto *np;
1985 int err;
1986
1987 n2_spu_driver_version();
1988
1989 full_name = dev->dev.of_node->full_name;
1990 pr_info("Found N2CP at %s\n", full_name);
1991
1992 np = alloc_n2cp();
1993 if (!np) {
1994 dev_err(&dev->dev, "%s: Unable to allocate n2cp.\n",
1995 full_name);
1996 return -ENOMEM;
1997 }
1998
1999 err = grab_global_resources();
2000 if (err) {
2001 dev_err(&dev->dev, "%s: Unable to grab "
2002 "global resources.\n", full_name);
2003 goto out_free_n2cp;
2004 }
2005
2006 mdesc = mdesc_grab();
2007
2008 if (!mdesc) {
2009 dev_err(&dev->dev, "%s: Unable to grab MDESC.\n",
2010 full_name);
2011 err = -ENODEV;
2012 goto out_free_global;
2013 }
2014 err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp");
2015 if (err) {
2016 dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n",
2017 full_name);
2018 mdesc_release(mdesc);
2019 goto out_free_global;
2020 }
2021
2022 err = spu_mdesc_scan(mdesc, dev, &np->cwq_info, &np->cwq_list,
2023 "cwq", HV_NCS_QTYPE_CWQ, cwq_intr,
2024 cpu_to_cwq);
2025 mdesc_release(mdesc);
2026
2027 if (err) {
2028 dev_err(&dev->dev, "%s: CWQ MDESC scan failed.\n",
2029 full_name);
2030 goto out_free_global;
2031 }
2032
2033 err = n2_register_algs();
2034 if (err) {
2035 dev_err(&dev->dev, "%s: Unable to register algorithms.\n",
2036 full_name);
2037 goto out_free_spu_list;
2038 }
2039
2040 dev_set_drvdata(&dev->dev, np);
2041
2042 return 0;
2043
2044out_free_spu_list:
2045 spu_list_destroy(&np->cwq_list);
2046
2047out_free_global:
2048 release_global_resources();
2049
2050out_free_n2cp:
2051 free_n2cp(np);
2052
2053 return err;
2054}
2055
2056static int n2_crypto_remove(struct platform_device *dev)
2057{
2058 struct n2_crypto *np = dev_get_drvdata(&dev->dev);
2059
2060 n2_unregister_algs();
2061
2062 spu_list_destroy(&np->cwq_list);
2063
2064 release_global_resources();
2065
2066 free_n2cp(np);
2067
2068 return 0;
2069}
2070
2071static struct n2_mau *alloc_ncp(void)
2072{
2073 struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL);
2074
2075 if (mp)
2076 INIT_LIST_HEAD(&mp->mau_list);
2077
2078 return mp;
2079}
2080
2081static void free_ncp(struct n2_mau *mp)
2082{
2083 if (mp->mau_info.ino_table) {
2084 kfree(mp->mau_info.ino_table);
2085 mp->mau_info.ino_table = NULL;
2086 }
2087
2088 kfree(mp);
2089}
2090
2091static int n2_mau_probe(struct platform_device *dev)
2092{
2093 struct mdesc_handle *mdesc;
2094 const char *full_name;
2095 struct n2_mau *mp;
2096 int err;
2097
2098 n2_spu_driver_version();
2099
2100 full_name = dev->dev.of_node->full_name;
2101 pr_info("Found NCP at %s\n", full_name);
2102
2103 mp = alloc_ncp();
2104 if (!mp) {
2105 dev_err(&dev->dev, "%s: Unable to allocate ncp.\n",
2106 full_name);
2107 return -ENOMEM;
2108 }
2109
2110 err = grab_global_resources();
2111 if (err) {
2112 dev_err(&dev->dev, "%s: Unable to grab "
2113 "global resources.\n", full_name);
2114 goto out_free_ncp;
2115 }
2116
2117 mdesc = mdesc_grab();
2118
2119 if (!mdesc) {
2120 dev_err(&dev->dev, "%s: Unable to grab MDESC.\n",
2121 full_name);
2122 err = -ENODEV;
2123 goto out_free_global;
2124 }
2125
2126 err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp");
2127 if (err) {
2128 dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n",
2129 full_name);
2130 mdesc_release(mdesc);
2131 goto out_free_global;
2132 }
2133
2134 err = spu_mdesc_scan(mdesc, dev, &mp->mau_info, &mp->mau_list,
2135 "mau", HV_NCS_QTYPE_MAU, mau_intr,
2136 cpu_to_mau);
2137 mdesc_release(mdesc);
2138
2139 if (err) {
2140 dev_err(&dev->dev, "%s: MAU MDESC scan failed.\n",
2141 full_name);
2142 goto out_free_global;
2143 }
2144
2145 dev_set_drvdata(&dev->dev, mp);
2146
2147 return 0;
2148
2149out_free_global:
2150 release_global_resources();
2151
2152out_free_ncp:
2153 free_ncp(mp);
2154
2155 return err;
2156}
2157
2158static int n2_mau_remove(struct platform_device *dev)
2159{
2160 struct n2_mau *mp = dev_get_drvdata(&dev->dev);
2161
2162 spu_list_destroy(&mp->mau_list);
2163
2164 release_global_resources();
2165
2166 free_ncp(mp);
2167
2168 return 0;
2169}
2170
2171static struct of_device_id n2_crypto_match[] = {
2172 {
2173 .name = "n2cp",
2174 .compatible = "SUNW,n2-cwq",
2175 },
2176 {
2177 .name = "n2cp",
2178 .compatible = "SUNW,vf-cwq",
2179 },
2180 {
2181 .name = "n2cp",
2182 .compatible = "SUNW,kt-cwq",
2183 },
2184 {},
2185};
2186
2187MODULE_DEVICE_TABLE(of, n2_crypto_match);
2188
2189static struct platform_driver n2_crypto_driver = {
2190 .driver = {
2191 .name = "n2cp",
2192 .of_match_table = n2_crypto_match,
2193 },
2194 .probe = n2_crypto_probe,
2195 .remove = n2_crypto_remove,
2196};
2197
2198static struct of_device_id n2_mau_match[] = {
2199 {
2200 .name = "ncp",
2201 .compatible = "SUNW,n2-mau",
2202 },
2203 {
2204 .name = "ncp",
2205 .compatible = "SUNW,vf-mau",
2206 },
2207 {
2208 .name = "ncp",
2209 .compatible = "SUNW,kt-mau",
2210 },
2211 {},
2212};
2213
2214MODULE_DEVICE_TABLE(of, n2_mau_match);
2215
2216static struct platform_driver n2_mau_driver = {
2217 .driver = {
2218 .name = "ncp",
2219 .of_match_table = n2_mau_match,
2220 },
2221 .probe = n2_mau_probe,
2222 .remove = n2_mau_remove,
2223};
2224
2225static struct platform_driver * const drivers[] = {
2226 &n2_crypto_driver,
2227 &n2_mau_driver,
2228};
2229
2230static int __init n2_init(void)
2231{
2232 return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
2233}
2234
2235static void __exit n2_exit(void)
2236{
2237 platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
2238}
2239
2240module_init(n2_init);
2241module_exit(n2_exit);