Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2 /* Driver for Virtio crypto device.
3 *
4 * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
5 */
6
7#include <linux/err.h>
8#include <linux/module.h>
9#include <linux/virtio_config.h>
10#include <linux/cpu.h>
11
12#include <uapi/linux/virtio_crypto.h>
13#include "virtio_crypto_common.h"
14
15
16void
17virtcrypto_clear_request(struct virtio_crypto_request *vc_req)
18{
19 if (vc_req) {
20 kfree_sensitive(vc_req->req_data);
21 kfree(vc_req->sgs);
22 }
23}
24
25static void virtio_crypto_ctrlq_callback(struct virtio_crypto_ctrl_request *vc_ctrl_req)
26{
27 complete(&vc_ctrl_req->compl);
28}
29
30static void virtcrypto_ctrlq_callback(struct virtqueue *vq)
31{
32 struct virtio_crypto *vcrypto = vq->vdev->priv;
33 struct virtio_crypto_ctrl_request *vc_ctrl_req;
34 unsigned long flags;
35 unsigned int len;
36
37 spin_lock_irqsave(&vcrypto->ctrl_lock, flags);
38 do {
39 virtqueue_disable_cb(vq);
40 while ((vc_ctrl_req = virtqueue_get_buf(vq, &len)) != NULL) {
41 spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
42 virtio_crypto_ctrlq_callback(vc_ctrl_req);
43 spin_lock_irqsave(&vcrypto->ctrl_lock, flags);
44 }
45 if (unlikely(virtqueue_is_broken(vq)))
46 break;
47 } while (!virtqueue_enable_cb(vq));
48 spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
49}
50
51int virtio_crypto_ctrl_vq_request(struct virtio_crypto *vcrypto, struct scatterlist *sgs[],
52 unsigned int out_sgs, unsigned int in_sgs,
53 struct virtio_crypto_ctrl_request *vc_ctrl_req)
54{
55 int err;
56 unsigned long flags;
57
58 init_completion(&vc_ctrl_req->compl);
59
60 spin_lock_irqsave(&vcrypto->ctrl_lock, flags);
61 err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, out_sgs, in_sgs, vc_ctrl_req, GFP_ATOMIC);
62 if (err < 0) {
63 spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
64 return err;
65 }
66
67 virtqueue_kick(vcrypto->ctrl_vq);
68 spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
69
70 wait_for_completion(&vc_ctrl_req->compl);
71
72 return 0;
73}
74
75static void virtcrypto_done_task(unsigned long data)
76{
77 struct data_queue *data_vq = (struct data_queue *)data;
78 struct virtqueue *vq = data_vq->vq;
79 struct virtio_crypto_request *vc_req;
80 unsigned int len;
81
82 do {
83 virtqueue_disable_cb(vq);
84 while ((vc_req = virtqueue_get_buf(vq, &len)) != NULL) {
85 if (vc_req->alg_cb)
86 vc_req->alg_cb(vc_req, len);
87 }
88 } while (!virtqueue_enable_cb(vq));
89}
90
91static void virtcrypto_dataq_callback(struct virtqueue *vq)
92{
93 struct virtio_crypto *vcrypto = vq->vdev->priv;
94 struct data_queue *dq = &vcrypto->data_vq[vq->index];
95
96 tasklet_schedule(&dq->done_task);
97}
98
99static int virtcrypto_find_vqs(struct virtio_crypto *vi)
100{
101 vq_callback_t **callbacks;
102 struct virtqueue **vqs;
103 int ret = -ENOMEM;
104 int i, total_vqs;
105 const char **names;
106 struct device *dev = &vi->vdev->dev;
107
108 /*
109 * We expect 1 data virtqueue, followed by
110 * possible N-1 data queues used in multiqueue mode,
111 * followed by control vq.
112 */
113 total_vqs = vi->max_data_queues + 1;
114
115 /* Allocate space for find_vqs parameters */
116 vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
117 if (!vqs)
118 goto err_vq;
119 callbacks = kcalloc(total_vqs, sizeof(*callbacks), GFP_KERNEL);
120 if (!callbacks)
121 goto err_callback;
122 names = kcalloc(total_vqs, sizeof(*names), GFP_KERNEL);
123 if (!names)
124 goto err_names;
125
126 /* Parameters for control virtqueue */
127 callbacks[total_vqs - 1] = virtcrypto_ctrlq_callback;
128 names[total_vqs - 1] = "controlq";
129
130 /* Allocate/initialize parameters for data virtqueues */
131 for (i = 0; i < vi->max_data_queues; i++) {
132 callbacks[i] = virtcrypto_dataq_callback;
133 snprintf(vi->data_vq[i].name, sizeof(vi->data_vq[i].name),
134 "dataq.%d", i);
135 names[i] = vi->data_vq[i].name;
136 }
137
138 ret = virtio_find_vqs(vi->vdev, total_vqs, vqs, callbacks, names, NULL);
139 if (ret)
140 goto err_find;
141
142 vi->ctrl_vq = vqs[total_vqs - 1];
143
144 for (i = 0; i < vi->max_data_queues; i++) {
145 spin_lock_init(&vi->data_vq[i].lock);
146 vi->data_vq[i].vq = vqs[i];
147 /* Initialize crypto engine */
148 vi->data_vq[i].engine = crypto_engine_alloc_init_and_set(dev, true, NULL, true,
149 virtqueue_get_vring_size(vqs[i]));
150 if (!vi->data_vq[i].engine) {
151 ret = -ENOMEM;
152 goto err_engine;
153 }
154 tasklet_init(&vi->data_vq[i].done_task, virtcrypto_done_task,
155 (unsigned long)&vi->data_vq[i]);
156 }
157
158 kfree(names);
159 kfree(callbacks);
160 kfree(vqs);
161
162 return 0;
163
164err_engine:
165err_find:
166 kfree(names);
167err_names:
168 kfree(callbacks);
169err_callback:
170 kfree(vqs);
171err_vq:
172 return ret;
173}
174
175static int virtcrypto_alloc_queues(struct virtio_crypto *vi)
176{
177 vi->data_vq = kcalloc(vi->max_data_queues, sizeof(*vi->data_vq),
178 GFP_KERNEL);
179 if (!vi->data_vq)
180 return -ENOMEM;
181
182 return 0;
183}
184
185static void virtcrypto_clean_affinity(struct virtio_crypto *vi, long hcpu)
186{
187 int i;
188
189 if (vi->affinity_hint_set) {
190 for (i = 0; i < vi->max_data_queues; i++)
191 virtqueue_set_affinity(vi->data_vq[i].vq, NULL);
192
193 vi->affinity_hint_set = false;
194 }
195}
196
197static void virtcrypto_set_affinity(struct virtio_crypto *vcrypto)
198{
199 int i = 0;
200 int cpu;
201
202 /*
203 * In single queue mode, we don't set the cpu affinity.
204 */
205 if (vcrypto->curr_queue == 1 || vcrypto->max_data_queues == 1) {
206 virtcrypto_clean_affinity(vcrypto, -1);
207 return;
208 }
209
210 /*
211 * In multiqueue mode, we let the queue to be private to one cpu
212 * by setting the affinity hint to eliminate the contention.
213 *
214 * TODO: adds cpu hotplug support by register cpu notifier.
215 *
216 */
217 for_each_online_cpu(cpu) {
218 virtqueue_set_affinity(vcrypto->data_vq[i].vq, cpumask_of(cpu));
219 if (++i >= vcrypto->max_data_queues)
220 break;
221 }
222
223 vcrypto->affinity_hint_set = true;
224}
225
226static void virtcrypto_free_queues(struct virtio_crypto *vi)
227{
228 kfree(vi->data_vq);
229}
230
231static int virtcrypto_init_vqs(struct virtio_crypto *vi)
232{
233 int ret;
234
235 /* Allocate send & receive queues */
236 ret = virtcrypto_alloc_queues(vi);
237 if (ret)
238 goto err;
239
240 ret = virtcrypto_find_vqs(vi);
241 if (ret)
242 goto err_free;
243
244 cpus_read_lock();
245 virtcrypto_set_affinity(vi);
246 cpus_read_unlock();
247
248 return 0;
249
250err_free:
251 virtcrypto_free_queues(vi);
252err:
253 return ret;
254}
255
256static int virtcrypto_update_status(struct virtio_crypto *vcrypto)
257{
258 u32 status;
259 int err;
260
261 virtio_cread_le(vcrypto->vdev,
262 struct virtio_crypto_config, status, &status);
263
264 /*
265 * Unknown status bits would be a host error and the driver
266 * should consider the device to be broken.
267 */
268 if (status & (~VIRTIO_CRYPTO_S_HW_READY)) {
269 dev_warn(&vcrypto->vdev->dev,
270 "Unknown status bits: 0x%x\n", status);
271
272 virtio_break_device(vcrypto->vdev);
273 return -EPERM;
274 }
275
276 if (vcrypto->status == status)
277 return 0;
278
279 vcrypto->status = status;
280
281 if (vcrypto->status & VIRTIO_CRYPTO_S_HW_READY) {
282 err = virtcrypto_dev_start(vcrypto);
283 if (err) {
284 dev_err(&vcrypto->vdev->dev,
285 "Failed to start virtio crypto device.\n");
286
287 return -EPERM;
288 }
289 dev_info(&vcrypto->vdev->dev, "Accelerator device is ready\n");
290 } else {
291 virtcrypto_dev_stop(vcrypto);
292 dev_info(&vcrypto->vdev->dev, "Accelerator is not ready\n");
293 }
294
295 return 0;
296}
297
298static int virtcrypto_start_crypto_engines(struct virtio_crypto *vcrypto)
299{
300 int32_t i;
301 int ret;
302
303 for (i = 0; i < vcrypto->max_data_queues; i++) {
304 if (vcrypto->data_vq[i].engine) {
305 ret = crypto_engine_start(vcrypto->data_vq[i].engine);
306 if (ret)
307 goto err;
308 }
309 }
310
311 return 0;
312
313err:
314 while (--i >= 0)
315 if (vcrypto->data_vq[i].engine)
316 crypto_engine_exit(vcrypto->data_vq[i].engine);
317
318 return ret;
319}
320
321static void virtcrypto_clear_crypto_engines(struct virtio_crypto *vcrypto)
322{
323 u32 i;
324
325 for (i = 0; i < vcrypto->max_data_queues; i++)
326 if (vcrypto->data_vq[i].engine)
327 crypto_engine_exit(vcrypto->data_vq[i].engine);
328}
329
330static void virtcrypto_del_vqs(struct virtio_crypto *vcrypto)
331{
332 struct virtio_device *vdev = vcrypto->vdev;
333
334 virtcrypto_clean_affinity(vcrypto, -1);
335
336 vdev->config->del_vqs(vdev);
337
338 virtcrypto_free_queues(vcrypto);
339}
340
341static void vcrypto_config_changed_work(struct work_struct *work)
342{
343 struct virtio_crypto *vcrypto =
344 container_of(work, struct virtio_crypto, config_work);
345
346 virtcrypto_update_status(vcrypto);
347}
348
349static int virtcrypto_probe(struct virtio_device *vdev)
350{
351 int err = -EFAULT;
352 struct virtio_crypto *vcrypto;
353 u32 max_data_queues = 0, max_cipher_key_len = 0;
354 u32 max_auth_key_len = 0;
355 u64 max_size = 0;
356 u32 cipher_algo_l = 0;
357 u32 cipher_algo_h = 0;
358 u32 hash_algo = 0;
359 u32 mac_algo_l = 0;
360 u32 mac_algo_h = 0;
361 u32 aead_algo = 0;
362 u32 akcipher_algo = 0;
363 u32 crypto_services = 0;
364
365 if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
366 return -ENODEV;
367
368 if (!vdev->config->get) {
369 dev_err(&vdev->dev, "%s failure: config access disabled\n",
370 __func__);
371 return -EINVAL;
372 }
373
374 if (num_possible_nodes() > 1 && dev_to_node(&vdev->dev) < 0) {
375 /*
376 * If the accelerator is connected to a node with no memory
377 * there is no point in using the accelerator since the remote
378 * memory transaction will be very slow.
379 */
380 dev_err(&vdev->dev, "Invalid NUMA configuration.\n");
381 return -EINVAL;
382 }
383
384 vcrypto = kzalloc_node(sizeof(*vcrypto), GFP_KERNEL,
385 dev_to_node(&vdev->dev));
386 if (!vcrypto)
387 return -ENOMEM;
388
389 virtio_cread_le(vdev, struct virtio_crypto_config,
390 max_dataqueues, &max_data_queues);
391 if (max_data_queues < 1)
392 max_data_queues = 1;
393
394 virtio_cread_le(vdev, struct virtio_crypto_config,
395 max_cipher_key_len, &max_cipher_key_len);
396 virtio_cread_le(vdev, struct virtio_crypto_config,
397 max_auth_key_len, &max_auth_key_len);
398 virtio_cread_le(vdev, struct virtio_crypto_config,
399 max_size, &max_size);
400 virtio_cread_le(vdev, struct virtio_crypto_config,
401 crypto_services, &crypto_services);
402 virtio_cread_le(vdev, struct virtio_crypto_config,
403 cipher_algo_l, &cipher_algo_l);
404 virtio_cread_le(vdev, struct virtio_crypto_config,
405 cipher_algo_h, &cipher_algo_h);
406 virtio_cread_le(vdev, struct virtio_crypto_config,
407 hash_algo, &hash_algo);
408 virtio_cread_le(vdev, struct virtio_crypto_config,
409 mac_algo_l, &mac_algo_l);
410 virtio_cread_le(vdev, struct virtio_crypto_config,
411 mac_algo_h, &mac_algo_h);
412 virtio_cread_le(vdev, struct virtio_crypto_config,
413 aead_algo, &aead_algo);
414 if (crypto_services & (1 << VIRTIO_CRYPTO_SERVICE_AKCIPHER))
415 virtio_cread_le(vdev, struct virtio_crypto_config,
416 akcipher_algo, &akcipher_algo);
417
418 /* Add virtio crypto device to global table */
419 err = virtcrypto_devmgr_add_dev(vcrypto);
420 if (err) {
421 dev_err(&vdev->dev, "Failed to add new virtio crypto device.\n");
422 goto free;
423 }
424 vcrypto->owner = THIS_MODULE;
425 vcrypto = vdev->priv = vcrypto;
426 vcrypto->vdev = vdev;
427
428 spin_lock_init(&vcrypto->ctrl_lock);
429
430 /* Use single data queue as default */
431 vcrypto->curr_queue = 1;
432 vcrypto->max_data_queues = max_data_queues;
433 vcrypto->max_cipher_key_len = max_cipher_key_len;
434 vcrypto->max_auth_key_len = max_auth_key_len;
435 vcrypto->max_size = max_size;
436 vcrypto->crypto_services = crypto_services;
437 vcrypto->cipher_algo_l = cipher_algo_l;
438 vcrypto->cipher_algo_h = cipher_algo_h;
439 vcrypto->mac_algo_l = mac_algo_l;
440 vcrypto->mac_algo_h = mac_algo_h;
441 vcrypto->hash_algo = hash_algo;
442 vcrypto->aead_algo = aead_algo;
443 vcrypto->akcipher_algo = akcipher_algo;
444
445 dev_info(&vdev->dev,
446 "max_queues: %u, max_cipher_key_len: %u, max_auth_key_len: %u, max_size 0x%llx\n",
447 vcrypto->max_data_queues,
448 vcrypto->max_cipher_key_len,
449 vcrypto->max_auth_key_len,
450 vcrypto->max_size);
451
452 err = virtcrypto_init_vqs(vcrypto);
453 if (err) {
454 dev_err(&vdev->dev, "Failed to initialize vqs.\n");
455 goto free_dev;
456 }
457
458 err = virtcrypto_start_crypto_engines(vcrypto);
459 if (err)
460 goto free_vqs;
461
462 virtio_device_ready(vdev);
463
464 err = virtcrypto_update_status(vcrypto);
465 if (err)
466 goto free_engines;
467
468 INIT_WORK(&vcrypto->config_work, vcrypto_config_changed_work);
469
470 return 0;
471
472free_engines:
473 virtcrypto_clear_crypto_engines(vcrypto);
474free_vqs:
475 virtio_reset_device(vdev);
476 virtcrypto_del_vqs(vcrypto);
477free_dev:
478 virtcrypto_devmgr_rm_dev(vcrypto);
479free:
480 kfree(vcrypto);
481 return err;
482}
483
484static void virtcrypto_free_unused_reqs(struct virtio_crypto *vcrypto)
485{
486 struct virtio_crypto_request *vc_req;
487 int i;
488 struct virtqueue *vq;
489
490 for (i = 0; i < vcrypto->max_data_queues; i++) {
491 vq = vcrypto->data_vq[i].vq;
492 while ((vc_req = virtqueue_detach_unused_buf(vq)) != NULL) {
493 kfree(vc_req->req_data);
494 kfree(vc_req->sgs);
495 }
496 cond_resched();
497 }
498}
499
500static void virtcrypto_remove(struct virtio_device *vdev)
501{
502 struct virtio_crypto *vcrypto = vdev->priv;
503 int i;
504
505 dev_info(&vdev->dev, "Start virtcrypto_remove.\n");
506
507 flush_work(&vcrypto->config_work);
508 if (virtcrypto_dev_started(vcrypto))
509 virtcrypto_dev_stop(vcrypto);
510 for (i = 0; i < vcrypto->max_data_queues; i++)
511 tasklet_kill(&vcrypto->data_vq[i].done_task);
512 virtio_reset_device(vdev);
513 virtcrypto_free_unused_reqs(vcrypto);
514 virtcrypto_clear_crypto_engines(vcrypto);
515 virtcrypto_del_vqs(vcrypto);
516 virtcrypto_devmgr_rm_dev(vcrypto);
517 kfree(vcrypto);
518}
519
520static void virtcrypto_config_changed(struct virtio_device *vdev)
521{
522 struct virtio_crypto *vcrypto = vdev->priv;
523
524 schedule_work(&vcrypto->config_work);
525}
526
527#ifdef CONFIG_PM_SLEEP
528static int virtcrypto_freeze(struct virtio_device *vdev)
529{
530 struct virtio_crypto *vcrypto = vdev->priv;
531
532 flush_work(&vcrypto->config_work);
533 virtio_reset_device(vdev);
534 virtcrypto_free_unused_reqs(vcrypto);
535 if (virtcrypto_dev_started(vcrypto))
536 virtcrypto_dev_stop(vcrypto);
537
538 virtcrypto_clear_crypto_engines(vcrypto);
539 virtcrypto_del_vqs(vcrypto);
540 return 0;
541}
542
543static int virtcrypto_restore(struct virtio_device *vdev)
544{
545 struct virtio_crypto *vcrypto = vdev->priv;
546 int err;
547
548 err = virtcrypto_init_vqs(vcrypto);
549 if (err)
550 return err;
551
552 err = virtcrypto_start_crypto_engines(vcrypto);
553 if (err)
554 goto free_vqs;
555
556 virtio_device_ready(vdev);
557
558 err = virtcrypto_dev_start(vcrypto);
559 if (err) {
560 dev_err(&vdev->dev, "Failed to start virtio crypto device.\n");
561 goto free_engines;
562 }
563
564 return 0;
565
566free_engines:
567 virtcrypto_clear_crypto_engines(vcrypto);
568free_vqs:
569 virtio_reset_device(vdev);
570 virtcrypto_del_vqs(vcrypto);
571 return err;
572}
573#endif
574
575static const unsigned int features[] = {
576 /* none */
577};
578
579static const struct virtio_device_id id_table[] = {
580 { VIRTIO_ID_CRYPTO, VIRTIO_DEV_ANY_ID },
581 { 0 },
582};
583
584static struct virtio_driver virtio_crypto_driver = {
585 .driver.name = KBUILD_MODNAME,
586 .driver.owner = THIS_MODULE,
587 .feature_table = features,
588 .feature_table_size = ARRAY_SIZE(features),
589 .id_table = id_table,
590 .probe = virtcrypto_probe,
591 .remove = virtcrypto_remove,
592 .config_changed = virtcrypto_config_changed,
593#ifdef CONFIG_PM_SLEEP
594 .freeze = virtcrypto_freeze,
595 .restore = virtcrypto_restore,
596#endif
597};
598
599module_virtio_driver(virtio_crypto_driver);
600
601MODULE_DEVICE_TABLE(virtio, id_table);
602MODULE_DESCRIPTION("virtio crypto device driver");
603MODULE_LICENSE("GPL");
604MODULE_AUTHOR("Gonglei <arei.gonglei@huawei.com>");
1// SPDX-License-Identifier: GPL-2.0-or-later
2 /* Driver for Virtio crypto device.
3 *
4 * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
5 */
6
7#include <linux/err.h>
8#include <linux/module.h>
9#include <linux/virtio_config.h>
10#include <linux/cpu.h>
11
12#include <uapi/linux/virtio_crypto.h>
13#include "virtio_crypto_common.h"
14
15
16void
17virtcrypto_clear_request(struct virtio_crypto_request *vc_req)
18{
19 if (vc_req) {
20 kfree_sensitive(vc_req->req_data);
21 kfree(vc_req->sgs);
22 }
23}
24
25static void virtio_crypto_ctrlq_callback(struct virtio_crypto_ctrl_request *vc_ctrl_req)
26{
27 complete(&vc_ctrl_req->compl);
28}
29
30static void virtcrypto_ctrlq_callback(struct virtqueue *vq)
31{
32 struct virtio_crypto *vcrypto = vq->vdev->priv;
33 struct virtio_crypto_ctrl_request *vc_ctrl_req;
34 unsigned long flags;
35 unsigned int len;
36
37 spin_lock_irqsave(&vcrypto->ctrl_lock, flags);
38 do {
39 virtqueue_disable_cb(vq);
40 while ((vc_ctrl_req = virtqueue_get_buf(vq, &len)) != NULL) {
41 spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
42 virtio_crypto_ctrlq_callback(vc_ctrl_req);
43 spin_lock_irqsave(&vcrypto->ctrl_lock, flags);
44 }
45 } while (!virtqueue_enable_cb(vq));
46 spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
47}
48
49int virtio_crypto_ctrl_vq_request(struct virtio_crypto *vcrypto, struct scatterlist *sgs[],
50 unsigned int out_sgs, unsigned int in_sgs,
51 struct virtio_crypto_ctrl_request *vc_ctrl_req)
52{
53 int err;
54 unsigned long flags;
55
56 init_completion(&vc_ctrl_req->compl);
57
58 spin_lock_irqsave(&vcrypto->ctrl_lock, flags);
59 err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, out_sgs, in_sgs, vc_ctrl_req, GFP_ATOMIC);
60 if (err < 0) {
61 spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
62 return err;
63 }
64
65 virtqueue_kick(vcrypto->ctrl_vq);
66 spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
67
68 wait_for_completion(&vc_ctrl_req->compl);
69
70 return 0;
71}
72
73static void virtcrypto_done_task(unsigned long data)
74{
75 struct data_queue *data_vq = (struct data_queue *)data;
76 struct virtqueue *vq = data_vq->vq;
77 struct virtio_crypto_request *vc_req;
78 unsigned int len;
79
80 do {
81 virtqueue_disable_cb(vq);
82 while ((vc_req = virtqueue_get_buf(vq, &len)) != NULL) {
83 if (vc_req->alg_cb)
84 vc_req->alg_cb(vc_req, len);
85 }
86 } while (!virtqueue_enable_cb(vq));
87}
88
89static void virtcrypto_dataq_callback(struct virtqueue *vq)
90{
91 struct virtio_crypto *vcrypto = vq->vdev->priv;
92 struct data_queue *dq = &vcrypto->data_vq[vq->index];
93
94 tasklet_schedule(&dq->done_task);
95}
96
97static int virtcrypto_find_vqs(struct virtio_crypto *vi)
98{
99 vq_callback_t **callbacks;
100 struct virtqueue **vqs;
101 int ret = -ENOMEM;
102 int i, total_vqs;
103 const char **names;
104 struct device *dev = &vi->vdev->dev;
105
106 /*
107 * We expect 1 data virtqueue, followed by
108 * possible N-1 data queues used in multiqueue mode,
109 * followed by control vq.
110 */
111 total_vqs = vi->max_data_queues + 1;
112
113 /* Allocate space for find_vqs parameters */
114 vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
115 if (!vqs)
116 goto err_vq;
117 callbacks = kcalloc(total_vqs, sizeof(*callbacks), GFP_KERNEL);
118 if (!callbacks)
119 goto err_callback;
120 names = kcalloc(total_vqs, sizeof(*names), GFP_KERNEL);
121 if (!names)
122 goto err_names;
123
124 /* Parameters for control virtqueue */
125 callbacks[total_vqs - 1] = virtcrypto_ctrlq_callback;
126 names[total_vqs - 1] = "controlq";
127
128 /* Allocate/initialize parameters for data virtqueues */
129 for (i = 0; i < vi->max_data_queues; i++) {
130 callbacks[i] = virtcrypto_dataq_callback;
131 snprintf(vi->data_vq[i].name, sizeof(vi->data_vq[i].name),
132 "dataq.%d", i);
133 names[i] = vi->data_vq[i].name;
134 }
135
136 ret = virtio_find_vqs(vi->vdev, total_vqs, vqs, callbacks, names, NULL);
137 if (ret)
138 goto err_find;
139
140 vi->ctrl_vq = vqs[total_vqs - 1];
141
142 for (i = 0; i < vi->max_data_queues; i++) {
143 spin_lock_init(&vi->data_vq[i].lock);
144 vi->data_vq[i].vq = vqs[i];
145 /* Initialize crypto engine */
146 vi->data_vq[i].engine = crypto_engine_alloc_init_and_set(dev, true, NULL, true,
147 virtqueue_get_vring_size(vqs[i]));
148 if (!vi->data_vq[i].engine) {
149 ret = -ENOMEM;
150 goto err_engine;
151 }
152 tasklet_init(&vi->data_vq[i].done_task, virtcrypto_done_task,
153 (unsigned long)&vi->data_vq[i]);
154 }
155
156 kfree(names);
157 kfree(callbacks);
158 kfree(vqs);
159
160 return 0;
161
162err_engine:
163err_find:
164 kfree(names);
165err_names:
166 kfree(callbacks);
167err_callback:
168 kfree(vqs);
169err_vq:
170 return ret;
171}
172
173static int virtcrypto_alloc_queues(struct virtio_crypto *vi)
174{
175 vi->data_vq = kcalloc(vi->max_data_queues, sizeof(*vi->data_vq),
176 GFP_KERNEL);
177 if (!vi->data_vq)
178 return -ENOMEM;
179
180 return 0;
181}
182
183static void virtcrypto_clean_affinity(struct virtio_crypto *vi, long hcpu)
184{
185 int i;
186
187 if (vi->affinity_hint_set) {
188 for (i = 0; i < vi->max_data_queues; i++)
189 virtqueue_set_affinity(vi->data_vq[i].vq, NULL);
190
191 vi->affinity_hint_set = false;
192 }
193}
194
195static void virtcrypto_set_affinity(struct virtio_crypto *vcrypto)
196{
197 int i = 0;
198 int cpu;
199
200 /*
201 * In single queue mode, we don't set the cpu affinity.
202 */
203 if (vcrypto->curr_queue == 1 || vcrypto->max_data_queues == 1) {
204 virtcrypto_clean_affinity(vcrypto, -1);
205 return;
206 }
207
208 /*
209 * In multiqueue mode, we let the queue to be private to one cpu
210 * by setting the affinity hint to eliminate the contention.
211 *
212 * TODO: adds cpu hotplug support by register cpu notifier.
213 *
214 */
215 for_each_online_cpu(cpu) {
216 virtqueue_set_affinity(vcrypto->data_vq[i].vq, cpumask_of(cpu));
217 if (++i >= vcrypto->max_data_queues)
218 break;
219 }
220
221 vcrypto->affinity_hint_set = true;
222}
223
224static void virtcrypto_free_queues(struct virtio_crypto *vi)
225{
226 kfree(vi->data_vq);
227}
228
229static int virtcrypto_init_vqs(struct virtio_crypto *vi)
230{
231 int ret;
232
233 /* Allocate send & receive queues */
234 ret = virtcrypto_alloc_queues(vi);
235 if (ret)
236 goto err;
237
238 ret = virtcrypto_find_vqs(vi);
239 if (ret)
240 goto err_free;
241
242 cpus_read_lock();
243 virtcrypto_set_affinity(vi);
244 cpus_read_unlock();
245
246 return 0;
247
248err_free:
249 virtcrypto_free_queues(vi);
250err:
251 return ret;
252}
253
254static int virtcrypto_update_status(struct virtio_crypto *vcrypto)
255{
256 u32 status;
257 int err;
258
259 virtio_cread_le(vcrypto->vdev,
260 struct virtio_crypto_config, status, &status);
261
262 /*
263 * Unknown status bits would be a host error and the driver
264 * should consider the device to be broken.
265 */
266 if (status & (~VIRTIO_CRYPTO_S_HW_READY)) {
267 dev_warn(&vcrypto->vdev->dev,
268 "Unknown status bits: 0x%x\n", status);
269
270 virtio_break_device(vcrypto->vdev);
271 return -EPERM;
272 }
273
274 if (vcrypto->status == status)
275 return 0;
276
277 vcrypto->status = status;
278
279 if (vcrypto->status & VIRTIO_CRYPTO_S_HW_READY) {
280 err = virtcrypto_dev_start(vcrypto);
281 if (err) {
282 dev_err(&vcrypto->vdev->dev,
283 "Failed to start virtio crypto device.\n");
284
285 return -EPERM;
286 }
287 dev_info(&vcrypto->vdev->dev, "Accelerator device is ready\n");
288 } else {
289 virtcrypto_dev_stop(vcrypto);
290 dev_info(&vcrypto->vdev->dev, "Accelerator is not ready\n");
291 }
292
293 return 0;
294}
295
296static int virtcrypto_start_crypto_engines(struct virtio_crypto *vcrypto)
297{
298 int32_t i;
299 int ret;
300
301 for (i = 0; i < vcrypto->max_data_queues; i++) {
302 if (vcrypto->data_vq[i].engine) {
303 ret = crypto_engine_start(vcrypto->data_vq[i].engine);
304 if (ret)
305 goto err;
306 }
307 }
308
309 return 0;
310
311err:
312 while (--i >= 0)
313 if (vcrypto->data_vq[i].engine)
314 crypto_engine_exit(vcrypto->data_vq[i].engine);
315
316 return ret;
317}
318
319static void virtcrypto_clear_crypto_engines(struct virtio_crypto *vcrypto)
320{
321 u32 i;
322
323 for (i = 0; i < vcrypto->max_data_queues; i++)
324 if (vcrypto->data_vq[i].engine)
325 crypto_engine_exit(vcrypto->data_vq[i].engine);
326}
327
328static void virtcrypto_del_vqs(struct virtio_crypto *vcrypto)
329{
330 struct virtio_device *vdev = vcrypto->vdev;
331
332 virtcrypto_clean_affinity(vcrypto, -1);
333
334 vdev->config->del_vqs(vdev);
335
336 virtcrypto_free_queues(vcrypto);
337}
338
339static void vcrypto_config_changed_work(struct work_struct *work)
340{
341 struct virtio_crypto *vcrypto =
342 container_of(work, struct virtio_crypto, config_work);
343
344 virtcrypto_update_status(vcrypto);
345}
346
347static int virtcrypto_probe(struct virtio_device *vdev)
348{
349 int err = -EFAULT;
350 struct virtio_crypto *vcrypto;
351 u32 max_data_queues = 0, max_cipher_key_len = 0;
352 u32 max_auth_key_len = 0;
353 u64 max_size = 0;
354 u32 cipher_algo_l = 0;
355 u32 cipher_algo_h = 0;
356 u32 hash_algo = 0;
357 u32 mac_algo_l = 0;
358 u32 mac_algo_h = 0;
359 u32 aead_algo = 0;
360 u32 akcipher_algo = 0;
361 u32 crypto_services = 0;
362
363 if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
364 return -ENODEV;
365
366 if (!vdev->config->get) {
367 dev_err(&vdev->dev, "%s failure: config access disabled\n",
368 __func__);
369 return -EINVAL;
370 }
371
372 if (num_possible_nodes() > 1 && dev_to_node(&vdev->dev) < 0) {
373 /*
374 * If the accelerator is connected to a node with no memory
375 * there is no point in using the accelerator since the remote
376 * memory transaction will be very slow.
377 */
378 dev_err(&vdev->dev, "Invalid NUMA configuration.\n");
379 return -EINVAL;
380 }
381
382 vcrypto = kzalloc_node(sizeof(*vcrypto), GFP_KERNEL,
383 dev_to_node(&vdev->dev));
384 if (!vcrypto)
385 return -ENOMEM;
386
387 virtio_cread_le(vdev, struct virtio_crypto_config,
388 max_dataqueues, &max_data_queues);
389 if (max_data_queues < 1)
390 max_data_queues = 1;
391
392 virtio_cread_le(vdev, struct virtio_crypto_config,
393 max_cipher_key_len, &max_cipher_key_len);
394 virtio_cread_le(vdev, struct virtio_crypto_config,
395 max_auth_key_len, &max_auth_key_len);
396 virtio_cread_le(vdev, struct virtio_crypto_config,
397 max_size, &max_size);
398 virtio_cread_le(vdev, struct virtio_crypto_config,
399 crypto_services, &crypto_services);
400 virtio_cread_le(vdev, struct virtio_crypto_config,
401 cipher_algo_l, &cipher_algo_l);
402 virtio_cread_le(vdev, struct virtio_crypto_config,
403 cipher_algo_h, &cipher_algo_h);
404 virtio_cread_le(vdev, struct virtio_crypto_config,
405 hash_algo, &hash_algo);
406 virtio_cread_le(vdev, struct virtio_crypto_config,
407 mac_algo_l, &mac_algo_l);
408 virtio_cread_le(vdev, struct virtio_crypto_config,
409 mac_algo_h, &mac_algo_h);
410 virtio_cread_le(vdev, struct virtio_crypto_config,
411 aead_algo, &aead_algo);
412 if (crypto_services & (1 << VIRTIO_CRYPTO_SERVICE_AKCIPHER))
413 virtio_cread_le(vdev, struct virtio_crypto_config,
414 akcipher_algo, &akcipher_algo);
415
416 /* Add virtio crypto device to global table */
417 err = virtcrypto_devmgr_add_dev(vcrypto);
418 if (err) {
419 dev_err(&vdev->dev, "Failed to add new virtio crypto device.\n");
420 goto free;
421 }
422 vcrypto->owner = THIS_MODULE;
423 vcrypto = vdev->priv = vcrypto;
424 vcrypto->vdev = vdev;
425
426 spin_lock_init(&vcrypto->ctrl_lock);
427
428 /* Use single data queue as default */
429 vcrypto->curr_queue = 1;
430 vcrypto->max_data_queues = max_data_queues;
431 vcrypto->max_cipher_key_len = max_cipher_key_len;
432 vcrypto->max_auth_key_len = max_auth_key_len;
433 vcrypto->max_size = max_size;
434 vcrypto->crypto_services = crypto_services;
435 vcrypto->cipher_algo_l = cipher_algo_l;
436 vcrypto->cipher_algo_h = cipher_algo_h;
437 vcrypto->mac_algo_l = mac_algo_l;
438 vcrypto->mac_algo_h = mac_algo_h;
439 vcrypto->hash_algo = hash_algo;
440 vcrypto->aead_algo = aead_algo;
441 vcrypto->akcipher_algo = akcipher_algo;
442
443 dev_info(&vdev->dev,
444 "max_queues: %u, max_cipher_key_len: %u, max_auth_key_len: %u, max_size 0x%llx\n",
445 vcrypto->max_data_queues,
446 vcrypto->max_cipher_key_len,
447 vcrypto->max_auth_key_len,
448 vcrypto->max_size);
449
450 err = virtcrypto_init_vqs(vcrypto);
451 if (err) {
452 dev_err(&vdev->dev, "Failed to initialize vqs.\n");
453 goto free_dev;
454 }
455
456 err = virtcrypto_start_crypto_engines(vcrypto);
457 if (err)
458 goto free_vqs;
459
460 virtio_device_ready(vdev);
461
462 err = virtcrypto_update_status(vcrypto);
463 if (err)
464 goto free_engines;
465
466 INIT_WORK(&vcrypto->config_work, vcrypto_config_changed_work);
467
468 return 0;
469
470free_engines:
471 virtcrypto_clear_crypto_engines(vcrypto);
472free_vqs:
473 virtio_reset_device(vdev);
474 virtcrypto_del_vqs(vcrypto);
475free_dev:
476 virtcrypto_devmgr_rm_dev(vcrypto);
477free:
478 kfree(vcrypto);
479 return err;
480}
481
482static void virtcrypto_free_unused_reqs(struct virtio_crypto *vcrypto)
483{
484 struct virtio_crypto_request *vc_req;
485 int i;
486 struct virtqueue *vq;
487
488 for (i = 0; i < vcrypto->max_data_queues; i++) {
489 vq = vcrypto->data_vq[i].vq;
490 while ((vc_req = virtqueue_detach_unused_buf(vq)) != NULL) {
491 kfree(vc_req->req_data);
492 kfree(vc_req->sgs);
493 }
494 cond_resched();
495 }
496}
497
498static void virtcrypto_remove(struct virtio_device *vdev)
499{
500 struct virtio_crypto *vcrypto = vdev->priv;
501 int i;
502
503 dev_info(&vdev->dev, "Start virtcrypto_remove.\n");
504
505 flush_work(&vcrypto->config_work);
506 if (virtcrypto_dev_started(vcrypto))
507 virtcrypto_dev_stop(vcrypto);
508 for (i = 0; i < vcrypto->max_data_queues; i++)
509 tasklet_kill(&vcrypto->data_vq[i].done_task);
510 virtio_reset_device(vdev);
511 virtcrypto_free_unused_reqs(vcrypto);
512 virtcrypto_clear_crypto_engines(vcrypto);
513 virtcrypto_del_vqs(vcrypto);
514 virtcrypto_devmgr_rm_dev(vcrypto);
515 kfree(vcrypto);
516}
517
518static void virtcrypto_config_changed(struct virtio_device *vdev)
519{
520 struct virtio_crypto *vcrypto = vdev->priv;
521
522 schedule_work(&vcrypto->config_work);
523}
524
525#ifdef CONFIG_PM_SLEEP
526static int virtcrypto_freeze(struct virtio_device *vdev)
527{
528 struct virtio_crypto *vcrypto = vdev->priv;
529
530 flush_work(&vcrypto->config_work);
531 virtio_reset_device(vdev);
532 virtcrypto_free_unused_reqs(vcrypto);
533 if (virtcrypto_dev_started(vcrypto))
534 virtcrypto_dev_stop(vcrypto);
535
536 virtcrypto_clear_crypto_engines(vcrypto);
537 virtcrypto_del_vqs(vcrypto);
538 return 0;
539}
540
541static int virtcrypto_restore(struct virtio_device *vdev)
542{
543 struct virtio_crypto *vcrypto = vdev->priv;
544 int err;
545
546 err = virtcrypto_init_vqs(vcrypto);
547 if (err)
548 return err;
549
550 err = virtcrypto_start_crypto_engines(vcrypto);
551 if (err)
552 goto free_vqs;
553
554 virtio_device_ready(vdev);
555
556 err = virtcrypto_dev_start(vcrypto);
557 if (err) {
558 dev_err(&vdev->dev, "Failed to start virtio crypto device.\n");
559 goto free_engines;
560 }
561
562 return 0;
563
564free_engines:
565 virtcrypto_clear_crypto_engines(vcrypto);
566free_vqs:
567 virtio_reset_device(vdev);
568 virtcrypto_del_vqs(vcrypto);
569 return err;
570}
571#endif
572
573static const unsigned int features[] = {
574 /* none */
575};
576
577static const struct virtio_device_id id_table[] = {
578 { VIRTIO_ID_CRYPTO, VIRTIO_DEV_ANY_ID },
579 { 0 },
580};
581
582static struct virtio_driver virtio_crypto_driver = {
583 .driver.name = KBUILD_MODNAME,
584 .driver.owner = THIS_MODULE,
585 .feature_table = features,
586 .feature_table_size = ARRAY_SIZE(features),
587 .id_table = id_table,
588 .probe = virtcrypto_probe,
589 .remove = virtcrypto_remove,
590 .config_changed = virtcrypto_config_changed,
591#ifdef CONFIG_PM_SLEEP
592 .freeze = virtcrypto_freeze,
593 .restore = virtcrypto_restore,
594#endif
595};
596
597module_virtio_driver(virtio_crypto_driver);
598
599MODULE_DEVICE_TABLE(virtio, id_table);
600MODULE_DESCRIPTION("virtio crypto device driver");
601MODULE_LICENSE("GPL");
602MODULE_AUTHOR("Gonglei <arei.gonglei@huawei.com>");