Loading...
1/*
2 * AMD Cryptographic Coprocessor (CCP) driver
3 *
4 * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
5 *
6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <linux/pci.h>
16#include <linux/kthread.h>
17#include <linux/interrupt.h>
18#include <linux/ccp.h>
19
20#include "ccp-dev.h"
21
22static int ccp_do_cmd(struct ccp_op *op, u32 *cr, unsigned int cr_count)
23{
24 struct ccp_cmd_queue *cmd_q = op->cmd_q;
25 struct ccp_device *ccp = cmd_q->ccp;
26 void __iomem *cr_addr;
27 u32 cr0, cmd;
28 unsigned int i;
29 int ret = 0;
30
31 /* We could read a status register to see how many free slots
32 * are actually available, but reading that register resets it
33 * and you could lose some error information.
34 */
35 cmd_q->free_slots--;
36
37 cr0 = (cmd_q->id << REQ0_CMD_Q_SHIFT)
38 | (op->jobid << REQ0_JOBID_SHIFT)
39 | REQ0_WAIT_FOR_WRITE;
40
41 if (op->soc)
42 cr0 |= REQ0_STOP_ON_COMPLETE
43 | REQ0_INT_ON_COMPLETE;
44
45 if (op->ioc || !cmd_q->free_slots)
46 cr0 |= REQ0_INT_ON_COMPLETE;
47
48 /* Start at CMD_REQ1 */
49 cr_addr = ccp->io_regs + CMD_REQ0 + CMD_REQ_INCR;
50
51 mutex_lock(&ccp->req_mutex);
52
53 /* Write CMD_REQ1 through CMD_REQx first */
54 for (i = 0; i < cr_count; i++, cr_addr += CMD_REQ_INCR)
55 iowrite32(*(cr + i), cr_addr);
56
57 /* Tell the CCP to start */
58 wmb();
59 iowrite32(cr0, ccp->io_regs + CMD_REQ0);
60
61 mutex_unlock(&ccp->req_mutex);
62
63 if (cr0 & REQ0_INT_ON_COMPLETE) {
64 /* Wait for the job to complete */
65 ret = wait_event_interruptible(cmd_q->int_queue,
66 cmd_q->int_rcvd);
67 if (ret || cmd_q->cmd_error) {
68 /* On error delete all related jobs from the queue */
69 cmd = (cmd_q->id << DEL_Q_ID_SHIFT)
70 | op->jobid;
71
72 iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB);
73
74 if (!ret)
75 ret = -EIO;
76 } else if (op->soc) {
77 /* Delete just head job from the queue on SoC */
78 cmd = DEL_Q_ACTIVE
79 | (cmd_q->id << DEL_Q_ID_SHIFT)
80 | op->jobid;
81
82 iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB);
83 }
84
85 cmd_q->free_slots = CMD_Q_DEPTH(cmd_q->q_status);
86
87 cmd_q->int_rcvd = 0;
88 }
89
90 return ret;
91}
92
93static int ccp_perform_aes(struct ccp_op *op)
94{
95 u32 cr[6];
96
97 /* Fill out the register contents for REQ1 through REQ6 */
98 cr[0] = (CCP_ENGINE_AES << REQ1_ENGINE_SHIFT)
99 | (op->u.aes.type << REQ1_AES_TYPE_SHIFT)
100 | (op->u.aes.mode << REQ1_AES_MODE_SHIFT)
101 | (op->u.aes.action << REQ1_AES_ACTION_SHIFT)
102 | (op->ksb_key << REQ1_KEY_KSB_SHIFT);
103 cr[1] = op->src.u.dma.length - 1;
104 cr[2] = ccp_addr_lo(&op->src.u.dma);
105 cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
106 | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
107 | ccp_addr_hi(&op->src.u.dma);
108 cr[4] = ccp_addr_lo(&op->dst.u.dma);
109 cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
110 | ccp_addr_hi(&op->dst.u.dma);
111
112 if (op->u.aes.mode == CCP_AES_MODE_CFB)
113 cr[0] |= ((0x7f) << REQ1_AES_CFB_SIZE_SHIFT);
114
115 if (op->eom)
116 cr[0] |= REQ1_EOM;
117
118 if (op->init)
119 cr[0] |= REQ1_INIT;
120
121 return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
122}
123
124static int ccp_perform_xts_aes(struct ccp_op *op)
125{
126 u32 cr[6];
127
128 /* Fill out the register contents for REQ1 through REQ6 */
129 cr[0] = (CCP_ENGINE_XTS_AES_128 << REQ1_ENGINE_SHIFT)
130 | (op->u.xts.action << REQ1_AES_ACTION_SHIFT)
131 | (op->u.xts.unit_size << REQ1_XTS_AES_SIZE_SHIFT)
132 | (op->ksb_key << REQ1_KEY_KSB_SHIFT);
133 cr[1] = op->src.u.dma.length - 1;
134 cr[2] = ccp_addr_lo(&op->src.u.dma);
135 cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
136 | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
137 | ccp_addr_hi(&op->src.u.dma);
138 cr[4] = ccp_addr_lo(&op->dst.u.dma);
139 cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
140 | ccp_addr_hi(&op->dst.u.dma);
141
142 if (op->eom)
143 cr[0] |= REQ1_EOM;
144
145 if (op->init)
146 cr[0] |= REQ1_INIT;
147
148 return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
149}
150
151static int ccp_perform_sha(struct ccp_op *op)
152{
153 u32 cr[6];
154
155 /* Fill out the register contents for REQ1 through REQ6 */
156 cr[0] = (CCP_ENGINE_SHA << REQ1_ENGINE_SHIFT)
157 | (op->u.sha.type << REQ1_SHA_TYPE_SHIFT)
158 | REQ1_INIT;
159 cr[1] = op->src.u.dma.length - 1;
160 cr[2] = ccp_addr_lo(&op->src.u.dma);
161 cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
162 | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
163 | ccp_addr_hi(&op->src.u.dma);
164
165 if (op->eom) {
166 cr[0] |= REQ1_EOM;
167 cr[4] = lower_32_bits(op->u.sha.msg_bits);
168 cr[5] = upper_32_bits(op->u.sha.msg_bits);
169 } else {
170 cr[4] = 0;
171 cr[5] = 0;
172 }
173
174 return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
175}
176
177static int ccp_perform_rsa(struct ccp_op *op)
178{
179 u32 cr[6];
180
181 /* Fill out the register contents for REQ1 through REQ6 */
182 cr[0] = (CCP_ENGINE_RSA << REQ1_ENGINE_SHIFT)
183 | (op->u.rsa.mod_size << REQ1_RSA_MOD_SIZE_SHIFT)
184 | (op->ksb_key << REQ1_KEY_KSB_SHIFT)
185 | REQ1_EOM;
186 cr[1] = op->u.rsa.input_len - 1;
187 cr[2] = ccp_addr_lo(&op->src.u.dma);
188 cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
189 | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
190 | ccp_addr_hi(&op->src.u.dma);
191 cr[4] = ccp_addr_lo(&op->dst.u.dma);
192 cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
193 | ccp_addr_hi(&op->dst.u.dma);
194
195 return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
196}
197
198static int ccp_perform_passthru(struct ccp_op *op)
199{
200 u32 cr[6];
201
202 /* Fill out the register contents for REQ1 through REQ6 */
203 cr[0] = (CCP_ENGINE_PASSTHRU << REQ1_ENGINE_SHIFT)
204 | (op->u.passthru.bit_mod << REQ1_PT_BW_SHIFT)
205 | (op->u.passthru.byte_swap << REQ1_PT_BS_SHIFT);
206
207 if (op->src.type == CCP_MEMTYPE_SYSTEM)
208 cr[1] = op->src.u.dma.length - 1;
209 else
210 cr[1] = op->dst.u.dma.length - 1;
211
212 if (op->src.type == CCP_MEMTYPE_SYSTEM) {
213 cr[2] = ccp_addr_lo(&op->src.u.dma);
214 cr[3] = (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
215 | ccp_addr_hi(&op->src.u.dma);
216
217 if (op->u.passthru.bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
218 cr[3] |= (op->ksb_key << REQ4_KSB_SHIFT);
219 } else {
220 cr[2] = op->src.u.ksb * CCP_KSB_BYTES;
221 cr[3] = (CCP_MEMTYPE_KSB << REQ4_MEMTYPE_SHIFT);
222 }
223
224 if (op->dst.type == CCP_MEMTYPE_SYSTEM) {
225 cr[4] = ccp_addr_lo(&op->dst.u.dma);
226 cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
227 | ccp_addr_hi(&op->dst.u.dma);
228 } else {
229 cr[4] = op->dst.u.ksb * CCP_KSB_BYTES;
230 cr[5] = (CCP_MEMTYPE_KSB << REQ6_MEMTYPE_SHIFT);
231 }
232
233 if (op->eom)
234 cr[0] |= REQ1_EOM;
235
236 return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
237}
238
239static int ccp_perform_ecc(struct ccp_op *op)
240{
241 u32 cr[6];
242
243 /* Fill out the register contents for REQ1 through REQ6 */
244 cr[0] = REQ1_ECC_AFFINE_CONVERT
245 | (CCP_ENGINE_ECC << REQ1_ENGINE_SHIFT)
246 | (op->u.ecc.function << REQ1_ECC_FUNCTION_SHIFT)
247 | REQ1_EOM;
248 cr[1] = op->src.u.dma.length - 1;
249 cr[2] = ccp_addr_lo(&op->src.u.dma);
250 cr[3] = (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
251 | ccp_addr_hi(&op->src.u.dma);
252 cr[4] = ccp_addr_lo(&op->dst.u.dma);
253 cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
254 | ccp_addr_hi(&op->dst.u.dma);
255
256 return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
257}
258
259static int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait)
260{
261 struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng);
262 u32 trng_value;
263 int len = min_t(int, sizeof(trng_value), max);
264
265 /*
266 * Locking is provided by the caller so we can update device
267 * hwrng-related fields safely
268 */
269 trng_value = ioread32(ccp->io_regs + TRNG_OUT_REG);
270 if (!trng_value) {
271 /* Zero is returned if not data is available or if a
272 * bad-entropy error is present. Assume an error if
273 * we exceed TRNG_RETRIES reads of zero.
274 */
275 if (ccp->hwrng_retries++ > TRNG_RETRIES)
276 return -EIO;
277
278 return 0;
279 }
280
281 /* Reset the counter and save the rng value */
282 ccp->hwrng_retries = 0;
283 memcpy(data, &trng_value, len);
284
285 return len;
286}
287
288static int ccp_init(struct ccp_device *ccp)
289{
290 struct device *dev = ccp->dev;
291 struct ccp_cmd_queue *cmd_q;
292 struct dma_pool *dma_pool;
293 char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
294 unsigned int qmr, qim, i;
295 int ret;
296
297 /* Find available queues */
298 qim = 0;
299 qmr = ioread32(ccp->io_regs + Q_MASK_REG);
300 for (i = 0; i < MAX_HW_QUEUES; i++) {
301 if (!(qmr & (1 << i)))
302 continue;
303
304 /* Allocate a dma pool for this queue */
305 snprintf(dma_pool_name, sizeof(dma_pool_name), "%s_q%d",
306 ccp->name, i);
307 dma_pool = dma_pool_create(dma_pool_name, dev,
308 CCP_DMAPOOL_MAX_SIZE,
309 CCP_DMAPOOL_ALIGN, 0);
310 if (!dma_pool) {
311 dev_err(dev, "unable to allocate dma pool\n");
312 ret = -ENOMEM;
313 goto e_pool;
314 }
315
316 cmd_q = &ccp->cmd_q[ccp->cmd_q_count];
317 ccp->cmd_q_count++;
318
319 cmd_q->ccp = ccp;
320 cmd_q->id = i;
321 cmd_q->dma_pool = dma_pool;
322
323 /* Reserve 2 KSB regions for the queue */
324 cmd_q->ksb_key = KSB_START + ccp->ksb_start++;
325 cmd_q->ksb_ctx = KSB_START + ccp->ksb_start++;
326 ccp->ksb_count -= 2;
327
328 /* Preset some register values and masks that are queue
329 * number dependent
330 */
331 cmd_q->reg_status = ccp->io_regs + CMD_Q_STATUS_BASE +
332 (CMD_Q_STATUS_INCR * i);
333 cmd_q->reg_int_status = ccp->io_regs + CMD_Q_INT_STATUS_BASE +
334 (CMD_Q_STATUS_INCR * i);
335 cmd_q->int_ok = 1 << (i * 2);
336 cmd_q->int_err = 1 << ((i * 2) + 1);
337
338 cmd_q->free_slots = CMD_Q_DEPTH(ioread32(cmd_q->reg_status));
339
340 init_waitqueue_head(&cmd_q->int_queue);
341
342 /* Build queue interrupt mask (two interrupts per queue) */
343 qim |= cmd_q->int_ok | cmd_q->int_err;
344
345#ifdef CONFIG_ARM64
346 /* For arm64 set the recommended queue cache settings */
347 iowrite32(ccp->axcache, ccp->io_regs + CMD_Q_CACHE_BASE +
348 (CMD_Q_CACHE_INC * i));
349#endif
350
351 dev_dbg(dev, "queue #%u available\n", i);
352 }
353 if (ccp->cmd_q_count == 0) {
354 dev_notice(dev, "no command queues available\n");
355 ret = -EIO;
356 goto e_pool;
357 }
358 dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count);
359
360 /* Disable and clear interrupts until ready */
361 iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
362 for (i = 0; i < ccp->cmd_q_count; i++) {
363 cmd_q = &ccp->cmd_q[i];
364
365 ioread32(cmd_q->reg_int_status);
366 ioread32(cmd_q->reg_status);
367 }
368 iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
369
370 /* Request an irq */
371 ret = ccp->get_irq(ccp);
372 if (ret) {
373 dev_err(dev, "unable to allocate an IRQ\n");
374 goto e_pool;
375 }
376
377 /* Initialize the queues used to wait for KSB space and suspend */
378 init_waitqueue_head(&ccp->ksb_queue);
379 init_waitqueue_head(&ccp->suspend_queue);
380
381 /* Create a kthread for each queue */
382 for (i = 0; i < ccp->cmd_q_count; i++) {
383 struct task_struct *kthread;
384
385 cmd_q = &ccp->cmd_q[i];
386
387 kthread = kthread_create(ccp_cmd_queue_thread, cmd_q,
388 "%s-q%u", ccp->name, cmd_q->id);
389 if (IS_ERR(kthread)) {
390 dev_err(dev, "error creating queue thread (%ld)\n",
391 PTR_ERR(kthread));
392 ret = PTR_ERR(kthread);
393 goto e_kthread;
394 }
395
396 cmd_q->kthread = kthread;
397 wake_up_process(kthread);
398 }
399
400 /* Register the RNG */
401 ccp->hwrng.name = ccp->rngname;
402 ccp->hwrng.read = ccp_trng_read;
403 ret = hwrng_register(&ccp->hwrng);
404 if (ret) {
405 dev_err(dev, "error registering hwrng (%d)\n", ret);
406 goto e_kthread;
407 }
408
409 ccp_add_device(ccp);
410
411 /* Enable interrupts */
412 iowrite32(qim, ccp->io_regs + IRQ_MASK_REG);
413
414 return 0;
415
416e_kthread:
417 for (i = 0; i < ccp->cmd_q_count; i++)
418 if (ccp->cmd_q[i].kthread)
419 kthread_stop(ccp->cmd_q[i].kthread);
420
421 ccp->free_irq(ccp);
422
423e_pool:
424 for (i = 0; i < ccp->cmd_q_count; i++)
425 dma_pool_destroy(ccp->cmd_q[i].dma_pool);
426
427 return ret;
428}
429
430static void ccp_destroy(struct ccp_device *ccp)
431{
432 struct ccp_cmd_queue *cmd_q;
433 struct ccp_cmd *cmd;
434 unsigned int qim, i;
435
436 /* Remove this device from the list of available units first */
437 ccp_del_device(ccp);
438
439 /* Unregister the RNG */
440 hwrng_unregister(&ccp->hwrng);
441
442 /* Stop the queue kthreads */
443 for (i = 0; i < ccp->cmd_q_count; i++)
444 if (ccp->cmd_q[i].kthread)
445 kthread_stop(ccp->cmd_q[i].kthread);
446
447 /* Build queue interrupt mask (two interrupt masks per queue) */
448 qim = 0;
449 for (i = 0; i < ccp->cmd_q_count; i++) {
450 cmd_q = &ccp->cmd_q[i];
451 qim |= cmd_q->int_ok | cmd_q->int_err;
452 }
453
454 /* Disable and clear interrupts */
455 iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
456 for (i = 0; i < ccp->cmd_q_count; i++) {
457 cmd_q = &ccp->cmd_q[i];
458
459 ioread32(cmd_q->reg_int_status);
460 ioread32(cmd_q->reg_status);
461 }
462 iowrite32(qim, ccp->io_regs + IRQ_STATUS_REG);
463
464 ccp->free_irq(ccp);
465
466 for (i = 0; i < ccp->cmd_q_count; i++)
467 dma_pool_destroy(ccp->cmd_q[i].dma_pool);
468
469 /* Flush the cmd and backlog queue */
470 while (!list_empty(&ccp->cmd)) {
471 /* Invoke the callback directly with an error code */
472 cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
473 list_del(&cmd->entry);
474 cmd->callback(cmd->data, -ENODEV);
475 }
476 while (!list_empty(&ccp->backlog)) {
477 /* Invoke the callback directly with an error code */
478 cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry);
479 list_del(&cmd->entry);
480 cmd->callback(cmd->data, -ENODEV);
481 }
482}
483
484static irqreturn_t ccp_irq_handler(int irq, void *data)
485{
486 struct device *dev = data;
487 struct ccp_device *ccp = dev_get_drvdata(dev);
488 struct ccp_cmd_queue *cmd_q;
489 u32 q_int, status;
490 unsigned int i;
491
492 status = ioread32(ccp->io_regs + IRQ_STATUS_REG);
493
494 for (i = 0; i < ccp->cmd_q_count; i++) {
495 cmd_q = &ccp->cmd_q[i];
496
497 q_int = status & (cmd_q->int_ok | cmd_q->int_err);
498 if (q_int) {
499 cmd_q->int_status = status;
500 cmd_q->q_status = ioread32(cmd_q->reg_status);
501 cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
502
503 /* On error, only save the first error value */
504 if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error)
505 cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
506
507 cmd_q->int_rcvd = 1;
508
509 /* Acknowledge the interrupt and wake the kthread */
510 iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG);
511 wake_up_interruptible(&cmd_q->int_queue);
512 }
513 }
514
515 return IRQ_HANDLED;
516}
517
518static struct ccp_actions ccp3_actions = {
519 .perform_aes = ccp_perform_aes,
520 .perform_xts_aes = ccp_perform_xts_aes,
521 .perform_sha = ccp_perform_sha,
522 .perform_rsa = ccp_perform_rsa,
523 .perform_passthru = ccp_perform_passthru,
524 .perform_ecc = ccp_perform_ecc,
525 .init = ccp_init,
526 .destroy = ccp_destroy,
527 .irqhandler = ccp_irq_handler,
528};
529
530struct ccp_vdata ccpv3 = {
531 .version = CCP_VERSION(3, 0),
532 .perform = &ccp3_actions,
533};
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * AMD Cryptographic Coprocessor (CCP) driver
4 *
5 * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
6 *
7 * Author: Tom Lendacky <thomas.lendacky@amd.com>
8 * Author: Gary R Hook <gary.hook@amd.com>
9 */
10
11#include <linux/module.h>
12#include <linux/kernel.h>
13#include <linux/kthread.h>
14#include <linux/interrupt.h>
15#include <linux/ccp.h>
16
17#include "ccp-dev.h"
18
19static u32 ccp_alloc_ksb(struct ccp_cmd_queue *cmd_q, unsigned int count)
20{
21 int start;
22 struct ccp_device *ccp = cmd_q->ccp;
23
24 for (;;) {
25 mutex_lock(&ccp->sb_mutex);
26
27 start = (u32)bitmap_find_next_zero_area(ccp->sb,
28 ccp->sb_count,
29 ccp->sb_start,
30 count, 0);
31 if (start <= ccp->sb_count) {
32 bitmap_set(ccp->sb, start, count);
33
34 mutex_unlock(&ccp->sb_mutex);
35 break;
36 }
37
38 ccp->sb_avail = 0;
39
40 mutex_unlock(&ccp->sb_mutex);
41
42 /* Wait for KSB entries to become available */
43 if (wait_event_interruptible(ccp->sb_queue, ccp->sb_avail))
44 return 0;
45 }
46
47 return KSB_START + start;
48}
49
50static void ccp_free_ksb(struct ccp_cmd_queue *cmd_q, unsigned int start,
51 unsigned int count)
52{
53 struct ccp_device *ccp = cmd_q->ccp;
54
55 if (!start)
56 return;
57
58 mutex_lock(&ccp->sb_mutex);
59
60 bitmap_clear(ccp->sb, start - KSB_START, count);
61
62 ccp->sb_avail = 1;
63
64 mutex_unlock(&ccp->sb_mutex);
65
66 wake_up_interruptible_all(&ccp->sb_queue);
67}
68
69static unsigned int ccp_get_free_slots(struct ccp_cmd_queue *cmd_q)
70{
71 return CMD_Q_DEPTH(ioread32(cmd_q->reg_status));
72}
73
74static int ccp_do_cmd(struct ccp_op *op, u32 *cr, unsigned int cr_count)
75{
76 struct ccp_cmd_queue *cmd_q = op->cmd_q;
77 struct ccp_device *ccp = cmd_q->ccp;
78 void __iomem *cr_addr;
79 u32 cr0, cmd;
80 unsigned int i;
81 int ret = 0;
82
83 /* We could read a status register to see how many free slots
84 * are actually available, but reading that register resets it
85 * and you could lose some error information.
86 */
87 cmd_q->free_slots--;
88
89 cr0 = (cmd_q->id << REQ0_CMD_Q_SHIFT)
90 | (op->jobid << REQ0_JOBID_SHIFT)
91 | REQ0_WAIT_FOR_WRITE;
92
93 if (op->soc)
94 cr0 |= REQ0_STOP_ON_COMPLETE
95 | REQ0_INT_ON_COMPLETE;
96
97 if (op->ioc || !cmd_q->free_slots)
98 cr0 |= REQ0_INT_ON_COMPLETE;
99
100 /* Start at CMD_REQ1 */
101 cr_addr = ccp->io_regs + CMD_REQ0 + CMD_REQ_INCR;
102
103 mutex_lock(&ccp->req_mutex);
104
105 /* Write CMD_REQ1 through CMD_REQx first */
106 for (i = 0; i < cr_count; i++, cr_addr += CMD_REQ_INCR)
107 iowrite32(*(cr + i), cr_addr);
108
109 /* Tell the CCP to start */
110 wmb();
111 iowrite32(cr0, ccp->io_regs + CMD_REQ0);
112
113 mutex_unlock(&ccp->req_mutex);
114
115 if (cr0 & REQ0_INT_ON_COMPLETE) {
116 /* Wait for the job to complete */
117 ret = wait_event_interruptible(cmd_q->int_queue,
118 cmd_q->int_rcvd);
119 if (ret || cmd_q->cmd_error) {
120 /* On error delete all related jobs from the queue */
121 cmd = (cmd_q->id << DEL_Q_ID_SHIFT)
122 | op->jobid;
123 if (cmd_q->cmd_error)
124 ccp_log_error(cmd_q->ccp,
125 cmd_q->cmd_error);
126
127 iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB);
128
129 if (!ret)
130 ret = -EIO;
131 } else if (op->soc) {
132 /* Delete just head job from the queue on SoC */
133 cmd = DEL_Q_ACTIVE
134 | (cmd_q->id << DEL_Q_ID_SHIFT)
135 | op->jobid;
136
137 iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB);
138 }
139
140 cmd_q->free_slots = CMD_Q_DEPTH(cmd_q->q_status);
141
142 cmd_q->int_rcvd = 0;
143 }
144
145 return ret;
146}
147
148static int ccp_perform_aes(struct ccp_op *op)
149{
150 u32 cr[6];
151
152 /* Fill out the register contents for REQ1 through REQ6 */
153 cr[0] = (CCP_ENGINE_AES << REQ1_ENGINE_SHIFT)
154 | (op->u.aes.type << REQ1_AES_TYPE_SHIFT)
155 | (op->u.aes.mode << REQ1_AES_MODE_SHIFT)
156 | (op->u.aes.action << REQ1_AES_ACTION_SHIFT)
157 | (op->sb_key << REQ1_KEY_KSB_SHIFT);
158 cr[1] = op->src.u.dma.length - 1;
159 cr[2] = ccp_addr_lo(&op->src.u.dma);
160 cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT)
161 | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
162 | ccp_addr_hi(&op->src.u.dma);
163 cr[4] = ccp_addr_lo(&op->dst.u.dma);
164 cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
165 | ccp_addr_hi(&op->dst.u.dma);
166
167 if (op->u.aes.mode == CCP_AES_MODE_CFB)
168 cr[0] |= ((0x7f) << REQ1_AES_CFB_SIZE_SHIFT);
169
170 if (op->eom)
171 cr[0] |= REQ1_EOM;
172
173 if (op->init)
174 cr[0] |= REQ1_INIT;
175
176 return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
177}
178
179static int ccp_perform_xts_aes(struct ccp_op *op)
180{
181 u32 cr[6];
182
183 /* Fill out the register contents for REQ1 through REQ6 */
184 cr[0] = (CCP_ENGINE_XTS_AES_128 << REQ1_ENGINE_SHIFT)
185 | (op->u.xts.action << REQ1_AES_ACTION_SHIFT)
186 | (op->u.xts.unit_size << REQ1_XTS_AES_SIZE_SHIFT)
187 | (op->sb_key << REQ1_KEY_KSB_SHIFT);
188 cr[1] = op->src.u.dma.length - 1;
189 cr[2] = ccp_addr_lo(&op->src.u.dma);
190 cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT)
191 | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
192 | ccp_addr_hi(&op->src.u.dma);
193 cr[4] = ccp_addr_lo(&op->dst.u.dma);
194 cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
195 | ccp_addr_hi(&op->dst.u.dma);
196
197 if (op->eom)
198 cr[0] |= REQ1_EOM;
199
200 if (op->init)
201 cr[0] |= REQ1_INIT;
202
203 return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
204}
205
206static int ccp_perform_sha(struct ccp_op *op)
207{
208 u32 cr[6];
209
210 /* Fill out the register contents for REQ1 through REQ6 */
211 cr[0] = (CCP_ENGINE_SHA << REQ1_ENGINE_SHIFT)
212 | (op->u.sha.type << REQ1_SHA_TYPE_SHIFT)
213 | REQ1_INIT;
214 cr[1] = op->src.u.dma.length - 1;
215 cr[2] = ccp_addr_lo(&op->src.u.dma);
216 cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT)
217 | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
218 | ccp_addr_hi(&op->src.u.dma);
219
220 if (op->eom) {
221 cr[0] |= REQ1_EOM;
222 cr[4] = lower_32_bits(op->u.sha.msg_bits);
223 cr[5] = upper_32_bits(op->u.sha.msg_bits);
224 } else {
225 cr[4] = 0;
226 cr[5] = 0;
227 }
228
229 return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
230}
231
232static int ccp_perform_rsa(struct ccp_op *op)
233{
234 u32 cr[6];
235
236 /* Fill out the register contents for REQ1 through REQ6 */
237 cr[0] = (CCP_ENGINE_RSA << REQ1_ENGINE_SHIFT)
238 | (op->u.rsa.mod_size << REQ1_RSA_MOD_SIZE_SHIFT)
239 | (op->sb_key << REQ1_KEY_KSB_SHIFT)
240 | REQ1_EOM;
241 cr[1] = op->u.rsa.input_len - 1;
242 cr[2] = ccp_addr_lo(&op->src.u.dma);
243 cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT)
244 | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
245 | ccp_addr_hi(&op->src.u.dma);
246 cr[4] = ccp_addr_lo(&op->dst.u.dma);
247 cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
248 | ccp_addr_hi(&op->dst.u.dma);
249
250 return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
251}
252
253static int ccp_perform_passthru(struct ccp_op *op)
254{
255 u32 cr[6];
256
257 /* Fill out the register contents for REQ1 through REQ6 */
258 cr[0] = (CCP_ENGINE_PASSTHRU << REQ1_ENGINE_SHIFT)
259 | (op->u.passthru.bit_mod << REQ1_PT_BW_SHIFT)
260 | (op->u.passthru.byte_swap << REQ1_PT_BS_SHIFT);
261
262 if (op->src.type == CCP_MEMTYPE_SYSTEM)
263 cr[1] = op->src.u.dma.length - 1;
264 else
265 cr[1] = op->dst.u.dma.length - 1;
266
267 if (op->src.type == CCP_MEMTYPE_SYSTEM) {
268 cr[2] = ccp_addr_lo(&op->src.u.dma);
269 cr[3] = (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
270 | ccp_addr_hi(&op->src.u.dma);
271
272 if (op->u.passthru.bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
273 cr[3] |= (op->sb_key << REQ4_KSB_SHIFT);
274 } else {
275 cr[2] = op->src.u.sb * CCP_SB_BYTES;
276 cr[3] = (CCP_MEMTYPE_SB << REQ4_MEMTYPE_SHIFT);
277 }
278
279 if (op->dst.type == CCP_MEMTYPE_SYSTEM) {
280 cr[4] = ccp_addr_lo(&op->dst.u.dma);
281 cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
282 | ccp_addr_hi(&op->dst.u.dma);
283 } else {
284 cr[4] = op->dst.u.sb * CCP_SB_BYTES;
285 cr[5] = (CCP_MEMTYPE_SB << REQ6_MEMTYPE_SHIFT);
286 }
287
288 if (op->eom)
289 cr[0] |= REQ1_EOM;
290
291 return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
292}
293
294static int ccp_perform_ecc(struct ccp_op *op)
295{
296 u32 cr[6];
297
298 /* Fill out the register contents for REQ1 through REQ6 */
299 cr[0] = REQ1_ECC_AFFINE_CONVERT
300 | (CCP_ENGINE_ECC << REQ1_ENGINE_SHIFT)
301 | (op->u.ecc.function << REQ1_ECC_FUNCTION_SHIFT)
302 | REQ1_EOM;
303 cr[1] = op->src.u.dma.length - 1;
304 cr[2] = ccp_addr_lo(&op->src.u.dma);
305 cr[3] = (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
306 | ccp_addr_hi(&op->src.u.dma);
307 cr[4] = ccp_addr_lo(&op->dst.u.dma);
308 cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
309 | ccp_addr_hi(&op->dst.u.dma);
310
311 return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
312}
313
314static void ccp_disable_queue_interrupts(struct ccp_device *ccp)
315{
316 iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG);
317}
318
319static void ccp_enable_queue_interrupts(struct ccp_device *ccp)
320{
321 iowrite32(ccp->qim, ccp->io_regs + IRQ_MASK_REG);
322}
323
324static void ccp_irq_bh(unsigned long data)
325{
326 struct ccp_device *ccp = (struct ccp_device *)data;
327 struct ccp_cmd_queue *cmd_q;
328 u32 q_int, status;
329 unsigned int i;
330
331 status = ioread32(ccp->io_regs + IRQ_STATUS_REG);
332
333 for (i = 0; i < ccp->cmd_q_count; i++) {
334 cmd_q = &ccp->cmd_q[i];
335
336 q_int = status & (cmd_q->int_ok | cmd_q->int_err);
337 if (q_int) {
338 cmd_q->int_status = status;
339 cmd_q->q_status = ioread32(cmd_q->reg_status);
340 cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
341
342 /* On error, only save the first error value */
343 if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error)
344 cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
345
346 cmd_q->int_rcvd = 1;
347
348 /* Acknowledge the interrupt and wake the kthread */
349 iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG);
350 wake_up_interruptible(&cmd_q->int_queue);
351 }
352 }
353 ccp_enable_queue_interrupts(ccp);
354}
355
356static irqreturn_t ccp_irq_handler(int irq, void *data)
357{
358 struct ccp_device *ccp = (struct ccp_device *)data;
359
360 ccp_disable_queue_interrupts(ccp);
361 if (ccp->use_tasklet)
362 tasklet_schedule(&ccp->irq_tasklet);
363 else
364 ccp_irq_bh((unsigned long)ccp);
365
366 return IRQ_HANDLED;
367}
368
369static int ccp_init(struct ccp_device *ccp)
370{
371 struct device *dev = ccp->dev;
372 struct ccp_cmd_queue *cmd_q;
373 struct dma_pool *dma_pool;
374 char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
375 unsigned int qmr, i;
376 int ret;
377
378 /* Find available queues */
379 ccp->qim = 0;
380 qmr = ioread32(ccp->io_regs + Q_MASK_REG);
381 for (i = 0; (i < MAX_HW_QUEUES) && (ccp->cmd_q_count < ccp->max_q_count); i++) {
382 if (!(qmr & (1 << i)))
383 continue;
384
385 /* Allocate a dma pool for this queue */
386 snprintf(dma_pool_name, sizeof(dma_pool_name), "%s_q%d",
387 ccp->name, i);
388 dma_pool = dma_pool_create(dma_pool_name, dev,
389 CCP_DMAPOOL_MAX_SIZE,
390 CCP_DMAPOOL_ALIGN, 0);
391 if (!dma_pool) {
392 dev_err(dev, "unable to allocate dma pool\n");
393 ret = -ENOMEM;
394 goto e_pool;
395 }
396
397 cmd_q = &ccp->cmd_q[ccp->cmd_q_count];
398 ccp->cmd_q_count++;
399
400 cmd_q->ccp = ccp;
401 cmd_q->id = i;
402 cmd_q->dma_pool = dma_pool;
403
404 /* Reserve 2 KSB regions for the queue */
405 cmd_q->sb_key = KSB_START + ccp->sb_start++;
406 cmd_q->sb_ctx = KSB_START + ccp->sb_start++;
407 ccp->sb_count -= 2;
408
409 /* Preset some register values and masks that are queue
410 * number dependent
411 */
412 cmd_q->reg_status = ccp->io_regs + CMD_Q_STATUS_BASE +
413 (CMD_Q_STATUS_INCR * i);
414 cmd_q->reg_int_status = ccp->io_regs + CMD_Q_INT_STATUS_BASE +
415 (CMD_Q_STATUS_INCR * i);
416 cmd_q->int_ok = 1 << (i * 2);
417 cmd_q->int_err = 1 << ((i * 2) + 1);
418
419 cmd_q->free_slots = ccp_get_free_slots(cmd_q);
420
421 init_waitqueue_head(&cmd_q->int_queue);
422
423 /* Build queue interrupt mask (two interrupts per queue) */
424 ccp->qim |= cmd_q->int_ok | cmd_q->int_err;
425
426#ifdef CONFIG_ARM64
427 /* For arm64 set the recommended queue cache settings */
428 iowrite32(ccp->axcache, ccp->io_regs + CMD_Q_CACHE_BASE +
429 (CMD_Q_CACHE_INC * i));
430#endif
431
432 dev_dbg(dev, "queue #%u available\n", i);
433 }
434 if (ccp->cmd_q_count == 0) {
435 dev_notice(dev, "no command queues available\n");
436 ret = -EIO;
437 goto e_pool;
438 }
439 dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count);
440
441 /* Disable and clear interrupts until ready */
442 ccp_disable_queue_interrupts(ccp);
443 for (i = 0; i < ccp->cmd_q_count; i++) {
444 cmd_q = &ccp->cmd_q[i];
445
446 ioread32(cmd_q->reg_int_status);
447 ioread32(cmd_q->reg_status);
448 }
449 iowrite32(ccp->qim, ccp->io_regs + IRQ_STATUS_REG);
450
451 /* Request an irq */
452 ret = sp_request_ccp_irq(ccp->sp, ccp_irq_handler, ccp->name, ccp);
453 if (ret) {
454 dev_err(dev, "unable to allocate an IRQ\n");
455 goto e_pool;
456 }
457
458 /* Initialize the ISR tasklet? */
459 if (ccp->use_tasklet)
460 tasklet_init(&ccp->irq_tasklet, ccp_irq_bh,
461 (unsigned long)ccp);
462
463 dev_dbg(dev, "Starting threads...\n");
464 /* Create a kthread for each queue */
465 for (i = 0; i < ccp->cmd_q_count; i++) {
466 struct task_struct *kthread;
467
468 cmd_q = &ccp->cmd_q[i];
469
470 kthread = kthread_create(ccp_cmd_queue_thread, cmd_q,
471 "%s-q%u", ccp->name, cmd_q->id);
472 if (IS_ERR(kthread)) {
473 dev_err(dev, "error creating queue thread (%ld)\n",
474 PTR_ERR(kthread));
475 ret = PTR_ERR(kthread);
476 goto e_kthread;
477 }
478
479 cmd_q->kthread = kthread;
480 wake_up_process(kthread);
481 }
482
483 dev_dbg(dev, "Enabling interrupts...\n");
484 /* Enable interrupts */
485 ccp_enable_queue_interrupts(ccp);
486
487 dev_dbg(dev, "Registering device...\n");
488 ccp_add_device(ccp);
489
490 ret = ccp_register_rng(ccp);
491 if (ret)
492 goto e_kthread;
493
494 /* Register the DMA engine support */
495 ret = ccp_dmaengine_register(ccp);
496 if (ret)
497 goto e_hwrng;
498
499 return 0;
500
501e_hwrng:
502 ccp_unregister_rng(ccp);
503
504e_kthread:
505 for (i = 0; i < ccp->cmd_q_count; i++)
506 if (ccp->cmd_q[i].kthread)
507 kthread_stop(ccp->cmd_q[i].kthread);
508
509 sp_free_ccp_irq(ccp->sp, ccp);
510
511e_pool:
512 for (i = 0; i < ccp->cmd_q_count; i++)
513 dma_pool_destroy(ccp->cmd_q[i].dma_pool);
514
515 return ret;
516}
517
518static void ccp_destroy(struct ccp_device *ccp)
519{
520 struct ccp_cmd_queue *cmd_q;
521 struct ccp_cmd *cmd;
522 unsigned int i;
523
524 /* Unregister the DMA engine */
525 ccp_dmaengine_unregister(ccp);
526
527 /* Unregister the RNG */
528 ccp_unregister_rng(ccp);
529
530 /* Remove this device from the list of available units */
531 ccp_del_device(ccp);
532
533 /* Disable and clear interrupts */
534 ccp_disable_queue_interrupts(ccp);
535 for (i = 0; i < ccp->cmd_q_count; i++) {
536 cmd_q = &ccp->cmd_q[i];
537
538 ioread32(cmd_q->reg_int_status);
539 ioread32(cmd_q->reg_status);
540 }
541 iowrite32(ccp->qim, ccp->io_regs + IRQ_STATUS_REG);
542
543 /* Stop the queue kthreads */
544 for (i = 0; i < ccp->cmd_q_count; i++)
545 if (ccp->cmd_q[i].kthread)
546 kthread_stop(ccp->cmd_q[i].kthread);
547
548 sp_free_ccp_irq(ccp->sp, ccp);
549
550 for (i = 0; i < ccp->cmd_q_count; i++)
551 dma_pool_destroy(ccp->cmd_q[i].dma_pool);
552
553 /* Flush the cmd and backlog queue */
554 while (!list_empty(&ccp->cmd)) {
555 /* Invoke the callback directly with an error code */
556 cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry);
557 list_del(&cmd->entry);
558 cmd->callback(cmd->data, -ENODEV);
559 }
560 while (!list_empty(&ccp->backlog)) {
561 /* Invoke the callback directly with an error code */
562 cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry);
563 list_del(&cmd->entry);
564 cmd->callback(cmd->data, -ENODEV);
565 }
566}
567
568static const struct ccp_actions ccp3_actions = {
569 .aes = ccp_perform_aes,
570 .xts_aes = ccp_perform_xts_aes,
571 .des3 = NULL,
572 .sha = ccp_perform_sha,
573 .rsa = ccp_perform_rsa,
574 .passthru = ccp_perform_passthru,
575 .ecc = ccp_perform_ecc,
576 .sballoc = ccp_alloc_ksb,
577 .sbfree = ccp_free_ksb,
578 .init = ccp_init,
579 .destroy = ccp_destroy,
580 .get_free_slots = ccp_get_free_slots,
581 .irqhandler = ccp_irq_handler,
582};
583
584const struct ccp_vdata ccpv3_platform = {
585 .version = CCP_VERSION(3, 0),
586 .setup = NULL,
587 .perform = &ccp3_actions,
588 .offset = 0,
589 .rsamax = CCP_RSA_MAX_WIDTH,
590};
591
592const struct ccp_vdata ccpv3 = {
593 .version = CCP_VERSION(3, 0),
594 .setup = NULL,
595 .perform = &ccp3_actions,
596 .offset = 0x20000,
597 .rsamax = CCP_RSA_MAX_WIDTH,
598};