Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2 /* Driver for Virtio crypto device.
  3  *
  4  * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
  5  */
  6
  7#include <linux/err.h>
  8#include <linux/module.h>
  9#include <linux/virtio_config.h>
 10#include <linux/cpu.h>
 11
 12#include <uapi/linux/virtio_crypto.h>
 13#include "virtio_crypto_common.h"
 14
 15
 16void
 17virtcrypto_clear_request(struct virtio_crypto_request *vc_req)
 18{
 19	if (vc_req) {
 20		kfree_sensitive(vc_req->req_data);
 21		kfree(vc_req->sgs);
 22	}
 23}
 24
 25static void virtio_crypto_ctrlq_callback(struct virtio_crypto_ctrl_request *vc_ctrl_req)
 26{
 27	complete(&vc_ctrl_req->compl);
 28}
 29
 30static void virtcrypto_ctrlq_callback(struct virtqueue *vq)
 31{
 32	struct virtio_crypto *vcrypto = vq->vdev->priv;
 33	struct virtio_crypto_ctrl_request *vc_ctrl_req;
 34	unsigned long flags;
 35	unsigned int len;
 36
 37	spin_lock_irqsave(&vcrypto->ctrl_lock, flags);
 38	do {
 39		virtqueue_disable_cb(vq);
 40		while ((vc_ctrl_req = virtqueue_get_buf(vq, &len)) != NULL) {
 41			spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
 42			virtio_crypto_ctrlq_callback(vc_ctrl_req);
 43			spin_lock_irqsave(&vcrypto->ctrl_lock, flags);
 44		}
 45		if (unlikely(virtqueue_is_broken(vq)))
 46			break;
 47	} while (!virtqueue_enable_cb(vq));
 48	spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
 49}
 50
 51int virtio_crypto_ctrl_vq_request(struct virtio_crypto *vcrypto, struct scatterlist *sgs[],
 52		unsigned int out_sgs, unsigned int in_sgs,
 53		struct virtio_crypto_ctrl_request *vc_ctrl_req)
 54{
 55	int err;
 56	unsigned long flags;
 57
 58	init_completion(&vc_ctrl_req->compl);
 59
 60	spin_lock_irqsave(&vcrypto->ctrl_lock, flags);
 61	err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, out_sgs, in_sgs, vc_ctrl_req, GFP_ATOMIC);
 62	if (err < 0) {
 63		spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
 64		return err;
 65	}
 66
 67	virtqueue_kick(vcrypto->ctrl_vq);
 68	spin_unlock_irqrestore(&vcrypto->ctrl_lock, flags);
 69
 70	wait_for_completion(&vc_ctrl_req->compl);
 71
 72	return 0;
 73}
 74
 75static void virtcrypto_done_task(unsigned long data)
 76{
 77	struct data_queue *data_vq = (struct data_queue *)data;
 78	struct virtqueue *vq = data_vq->vq;
 79	struct virtio_crypto_request *vc_req;
 80	unsigned int len;
 81
 82	do {
 83		virtqueue_disable_cb(vq);
 84		while ((vc_req = virtqueue_get_buf(vq, &len)) != NULL) {
 85			if (vc_req->alg_cb)
 86				vc_req->alg_cb(vc_req, len);
 87		}
 88	} while (!virtqueue_enable_cb(vq));
 89}
 90
 91static void virtcrypto_dataq_callback(struct virtqueue *vq)
 92{
 93	struct virtio_crypto *vcrypto = vq->vdev->priv;
 94	struct data_queue *dq = &vcrypto->data_vq[vq->index];
 95
 96	tasklet_schedule(&dq->done_task);
 97}
 98
 99static int virtcrypto_find_vqs(struct virtio_crypto *vi)
100{
101	vq_callback_t **callbacks;
102	struct virtqueue **vqs;
103	int ret = -ENOMEM;
104	int i, total_vqs;
105	const char **names;
106	struct device *dev = &vi->vdev->dev;
107
108	/*
109	 * We expect 1 data virtqueue, followed by
110	 * possible N-1 data queues used in multiqueue mode,
111	 * followed by control vq.
112	 */
113	total_vqs = vi->max_data_queues + 1;
114
115	/* Allocate space for find_vqs parameters */
116	vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
117	if (!vqs)
118		goto err_vq;
119	callbacks = kcalloc(total_vqs, sizeof(*callbacks), GFP_KERNEL);
120	if (!callbacks)
121		goto err_callback;
122	names = kcalloc(total_vqs, sizeof(*names), GFP_KERNEL);
123	if (!names)
124		goto err_names;
125
126	/* Parameters for control virtqueue */
127	callbacks[total_vqs - 1] = virtcrypto_ctrlq_callback;
128	names[total_vqs - 1] = "controlq";
129
130	/* Allocate/initialize parameters for data virtqueues */
131	for (i = 0; i < vi->max_data_queues; i++) {
132		callbacks[i] = virtcrypto_dataq_callback;
133		snprintf(vi->data_vq[i].name, sizeof(vi->data_vq[i].name),
134				"dataq.%d", i);
135		names[i] = vi->data_vq[i].name;
136	}
137
138	ret = virtio_find_vqs(vi->vdev, total_vqs, vqs, callbacks, names, NULL);
139	if (ret)
140		goto err_find;
141
142	vi->ctrl_vq = vqs[total_vqs - 1];
143
144	for (i = 0; i < vi->max_data_queues; i++) {
145		spin_lock_init(&vi->data_vq[i].lock);
146		vi->data_vq[i].vq = vqs[i];
147		/* Initialize crypto engine */
148		vi->data_vq[i].engine = crypto_engine_alloc_init_and_set(dev, true, NULL, true,
149						virtqueue_get_vring_size(vqs[i]));
150		if (!vi->data_vq[i].engine) {
151			ret = -ENOMEM;
152			goto err_engine;
153		}
154		tasklet_init(&vi->data_vq[i].done_task, virtcrypto_done_task,
155				(unsigned long)&vi->data_vq[i]);
156	}
157
158	kfree(names);
159	kfree(callbacks);
160	kfree(vqs);
161
162	return 0;
163
164err_engine:
165err_find:
166	kfree(names);
167err_names:
168	kfree(callbacks);
169err_callback:
170	kfree(vqs);
171err_vq:
172	return ret;
173}
174
175static int virtcrypto_alloc_queues(struct virtio_crypto *vi)
176{
177	vi->data_vq = kcalloc(vi->max_data_queues, sizeof(*vi->data_vq),
178				GFP_KERNEL);
179	if (!vi->data_vq)
180		return -ENOMEM;
181
182	return 0;
183}
184
185static void virtcrypto_clean_affinity(struct virtio_crypto *vi, long hcpu)
186{
187	int i;
188
189	if (vi->affinity_hint_set) {
190		for (i = 0; i < vi->max_data_queues; i++)
191			virtqueue_set_affinity(vi->data_vq[i].vq, NULL);
192
193		vi->affinity_hint_set = false;
194	}
195}
196
197static void virtcrypto_set_affinity(struct virtio_crypto *vcrypto)
198{
199	int i = 0;
200	int cpu;
201
202	/*
203	 * In single queue mode, we don't set the cpu affinity.
204	 */
205	if (vcrypto->curr_queue == 1 || vcrypto->max_data_queues == 1) {
206		virtcrypto_clean_affinity(vcrypto, -1);
207		return;
208	}
209
210	/*
211	 * In multiqueue mode, we let the queue to be private to one cpu
212	 * by setting the affinity hint to eliminate the contention.
213	 *
214	 * TODO: adds cpu hotplug support by register cpu notifier.
215	 *
216	 */
217	for_each_online_cpu(cpu) {
218		virtqueue_set_affinity(vcrypto->data_vq[i].vq, cpumask_of(cpu));
219		if (++i >= vcrypto->max_data_queues)
220			break;
221	}
222
223	vcrypto->affinity_hint_set = true;
224}
225
226static void virtcrypto_free_queues(struct virtio_crypto *vi)
227{
228	kfree(vi->data_vq);
229}
230
231static int virtcrypto_init_vqs(struct virtio_crypto *vi)
232{
233	int ret;
234
235	/* Allocate send & receive queues */
236	ret = virtcrypto_alloc_queues(vi);
237	if (ret)
238		goto err;
239
240	ret = virtcrypto_find_vqs(vi);
241	if (ret)
242		goto err_free;
243
244	cpus_read_lock();
245	virtcrypto_set_affinity(vi);
246	cpus_read_unlock();
247
248	return 0;
249
250err_free:
251	virtcrypto_free_queues(vi);
252err:
253	return ret;
254}
255
256static int virtcrypto_update_status(struct virtio_crypto *vcrypto)
257{
258	u32 status;
259	int err;
260
261	virtio_cread_le(vcrypto->vdev,
262			struct virtio_crypto_config, status, &status);
263
264	/*
265	 * Unknown status bits would be a host error and the driver
266	 * should consider the device to be broken.
267	 */
268	if (status & (~VIRTIO_CRYPTO_S_HW_READY)) {
269		dev_warn(&vcrypto->vdev->dev,
270				"Unknown status bits: 0x%x\n", status);
271
272		virtio_break_device(vcrypto->vdev);
273		return -EPERM;
274	}
275
276	if (vcrypto->status == status)
277		return 0;
278
279	vcrypto->status = status;
280
281	if (vcrypto->status & VIRTIO_CRYPTO_S_HW_READY) {
282		err = virtcrypto_dev_start(vcrypto);
283		if (err) {
284			dev_err(&vcrypto->vdev->dev,
285				"Failed to start virtio crypto device.\n");
286
287			return -EPERM;
288		}
289		dev_info(&vcrypto->vdev->dev, "Accelerator device is ready\n");
290	} else {
291		virtcrypto_dev_stop(vcrypto);
292		dev_info(&vcrypto->vdev->dev, "Accelerator is not ready\n");
293	}
294
295	return 0;
296}
297
298static int virtcrypto_start_crypto_engines(struct virtio_crypto *vcrypto)
299{
300	int32_t i;
301	int ret;
302
303	for (i = 0; i < vcrypto->max_data_queues; i++) {
304		if (vcrypto->data_vq[i].engine) {
305			ret = crypto_engine_start(vcrypto->data_vq[i].engine);
306			if (ret)
307				goto err;
308		}
309	}
310
311	return 0;
312
313err:
314	while (--i >= 0)
315		if (vcrypto->data_vq[i].engine)
316			crypto_engine_exit(vcrypto->data_vq[i].engine);
317
318	return ret;
319}
320
321static void virtcrypto_clear_crypto_engines(struct virtio_crypto *vcrypto)
322{
323	u32 i;
324
325	for (i = 0; i < vcrypto->max_data_queues; i++)
326		if (vcrypto->data_vq[i].engine)
327			crypto_engine_exit(vcrypto->data_vq[i].engine);
328}
329
330static void virtcrypto_del_vqs(struct virtio_crypto *vcrypto)
331{
332	struct virtio_device *vdev = vcrypto->vdev;
333
334	virtcrypto_clean_affinity(vcrypto, -1);
335
336	vdev->config->del_vqs(vdev);
337
338	virtcrypto_free_queues(vcrypto);
339}
340
341static void vcrypto_config_changed_work(struct work_struct *work)
342{
343	struct virtio_crypto *vcrypto =
344		container_of(work, struct virtio_crypto, config_work);
345
346	virtcrypto_update_status(vcrypto);
347}
348
349static int virtcrypto_probe(struct virtio_device *vdev)
350{
351	int err = -EFAULT;
352	struct virtio_crypto *vcrypto;
353	u32 max_data_queues = 0, max_cipher_key_len = 0;
354	u32 max_auth_key_len = 0;
355	u64 max_size = 0;
356	u32 cipher_algo_l = 0;
357	u32 cipher_algo_h = 0;
358	u32 hash_algo = 0;
359	u32 mac_algo_l = 0;
360	u32 mac_algo_h = 0;
361	u32 aead_algo = 0;
362	u32 akcipher_algo = 0;
363	u32 crypto_services = 0;
364
365	if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
366		return -ENODEV;
367
368	if (!vdev->config->get) {
369		dev_err(&vdev->dev, "%s failure: config access disabled\n",
370			__func__);
371		return -EINVAL;
372	}
373
374	if (num_possible_nodes() > 1 && dev_to_node(&vdev->dev) < 0) {
375		/*
376		 * If the accelerator is connected to a node with no memory
377		 * there is no point in using the accelerator since the remote
378		 * memory transaction will be very slow.
379		 */
380		dev_err(&vdev->dev, "Invalid NUMA configuration.\n");
381		return -EINVAL;
382	}
383
384	vcrypto = kzalloc_node(sizeof(*vcrypto), GFP_KERNEL,
385					dev_to_node(&vdev->dev));
386	if (!vcrypto)
387		return -ENOMEM;
388
389	virtio_cread_le(vdev, struct virtio_crypto_config,
390			max_dataqueues, &max_data_queues);
391	if (max_data_queues < 1)
392		max_data_queues = 1;
393
394	virtio_cread_le(vdev, struct virtio_crypto_config,
395			max_cipher_key_len, &max_cipher_key_len);
396	virtio_cread_le(vdev, struct virtio_crypto_config,
397			max_auth_key_len, &max_auth_key_len);
398	virtio_cread_le(vdev, struct virtio_crypto_config,
399			max_size, &max_size);
400	virtio_cread_le(vdev, struct virtio_crypto_config,
401			crypto_services, &crypto_services);
402	virtio_cread_le(vdev, struct virtio_crypto_config,
403			cipher_algo_l, &cipher_algo_l);
404	virtio_cread_le(vdev, struct virtio_crypto_config,
405			cipher_algo_h, &cipher_algo_h);
406	virtio_cread_le(vdev, struct virtio_crypto_config,
407			hash_algo, &hash_algo);
408	virtio_cread_le(vdev, struct virtio_crypto_config,
409			mac_algo_l, &mac_algo_l);
410	virtio_cread_le(vdev, struct virtio_crypto_config,
411			mac_algo_h, &mac_algo_h);
412	virtio_cread_le(vdev, struct virtio_crypto_config,
413			aead_algo, &aead_algo);
414	if (crypto_services & (1 << VIRTIO_CRYPTO_SERVICE_AKCIPHER))
415		virtio_cread_le(vdev, struct virtio_crypto_config,
416				akcipher_algo, &akcipher_algo);
417
418	/* Add virtio crypto device to global table */
419	err = virtcrypto_devmgr_add_dev(vcrypto);
420	if (err) {
421		dev_err(&vdev->dev, "Failed to add new virtio crypto device.\n");
422		goto free;
423	}
424	vcrypto->owner = THIS_MODULE;
425	vcrypto = vdev->priv = vcrypto;
426	vcrypto->vdev = vdev;
427
428	spin_lock_init(&vcrypto->ctrl_lock);
429
430	/* Use single data queue as default */
431	vcrypto->curr_queue = 1;
432	vcrypto->max_data_queues = max_data_queues;
433	vcrypto->max_cipher_key_len = max_cipher_key_len;
434	vcrypto->max_auth_key_len = max_auth_key_len;
435	vcrypto->max_size = max_size;
436	vcrypto->crypto_services = crypto_services;
437	vcrypto->cipher_algo_l = cipher_algo_l;
438	vcrypto->cipher_algo_h = cipher_algo_h;
439	vcrypto->mac_algo_l = mac_algo_l;
440	vcrypto->mac_algo_h = mac_algo_h;
441	vcrypto->hash_algo = hash_algo;
442	vcrypto->aead_algo = aead_algo;
443	vcrypto->akcipher_algo = akcipher_algo;
444
445	dev_info(&vdev->dev,
446		"max_queues: %u, max_cipher_key_len: %u, max_auth_key_len: %u, max_size 0x%llx\n",
447		vcrypto->max_data_queues,
448		vcrypto->max_cipher_key_len,
449		vcrypto->max_auth_key_len,
450		vcrypto->max_size);
451
452	err = virtcrypto_init_vqs(vcrypto);
453	if (err) {
454		dev_err(&vdev->dev, "Failed to initialize vqs.\n");
455		goto free_dev;
456	}
457
458	err = virtcrypto_start_crypto_engines(vcrypto);
459	if (err)
460		goto free_vqs;
461
462	virtio_device_ready(vdev);
463
464	err = virtcrypto_update_status(vcrypto);
465	if (err)
466		goto free_engines;
467
468	INIT_WORK(&vcrypto->config_work, vcrypto_config_changed_work);
469
470	return 0;
471
472free_engines:
473	virtcrypto_clear_crypto_engines(vcrypto);
474free_vqs:
475	virtio_reset_device(vdev);
476	virtcrypto_del_vqs(vcrypto);
477free_dev:
478	virtcrypto_devmgr_rm_dev(vcrypto);
479free:
480	kfree(vcrypto);
481	return err;
482}
483
484static void virtcrypto_free_unused_reqs(struct virtio_crypto *vcrypto)
485{
486	struct virtio_crypto_request *vc_req;
487	int i;
488	struct virtqueue *vq;
489
490	for (i = 0; i < vcrypto->max_data_queues; i++) {
491		vq = vcrypto->data_vq[i].vq;
492		while ((vc_req = virtqueue_detach_unused_buf(vq)) != NULL) {
493			kfree(vc_req->req_data);
494			kfree(vc_req->sgs);
495		}
496		cond_resched();
497	}
498}
499
500static void virtcrypto_remove(struct virtio_device *vdev)
501{
502	struct virtio_crypto *vcrypto = vdev->priv;
503	int i;
504
505	dev_info(&vdev->dev, "Start virtcrypto_remove.\n");
506
507	flush_work(&vcrypto->config_work);
508	if (virtcrypto_dev_started(vcrypto))
509		virtcrypto_dev_stop(vcrypto);
510	for (i = 0; i < vcrypto->max_data_queues; i++)
511		tasklet_kill(&vcrypto->data_vq[i].done_task);
512	virtio_reset_device(vdev);
513	virtcrypto_free_unused_reqs(vcrypto);
514	virtcrypto_clear_crypto_engines(vcrypto);
515	virtcrypto_del_vqs(vcrypto);
516	virtcrypto_devmgr_rm_dev(vcrypto);
517	kfree(vcrypto);
518}
519
520static void virtcrypto_config_changed(struct virtio_device *vdev)
521{
522	struct virtio_crypto *vcrypto = vdev->priv;
523
524	schedule_work(&vcrypto->config_work);
525}
526
527#ifdef CONFIG_PM_SLEEP
528static int virtcrypto_freeze(struct virtio_device *vdev)
529{
530	struct virtio_crypto *vcrypto = vdev->priv;
531
532	flush_work(&vcrypto->config_work);
533	virtio_reset_device(vdev);
534	virtcrypto_free_unused_reqs(vcrypto);
535	if (virtcrypto_dev_started(vcrypto))
536		virtcrypto_dev_stop(vcrypto);
537
538	virtcrypto_clear_crypto_engines(vcrypto);
539	virtcrypto_del_vqs(vcrypto);
540	return 0;
541}
542
543static int virtcrypto_restore(struct virtio_device *vdev)
544{
545	struct virtio_crypto *vcrypto = vdev->priv;
546	int err;
547
548	err = virtcrypto_init_vqs(vcrypto);
549	if (err)
550		return err;
551
552	err = virtcrypto_start_crypto_engines(vcrypto);
553	if (err)
554		goto free_vqs;
555
556	virtio_device_ready(vdev);
557
558	err = virtcrypto_dev_start(vcrypto);
559	if (err) {
560		dev_err(&vdev->dev, "Failed to start virtio crypto device.\n");
561		goto free_engines;
562	}
563
564	return 0;
565
566free_engines:
567	virtcrypto_clear_crypto_engines(vcrypto);
568free_vqs:
569	virtio_reset_device(vdev);
570	virtcrypto_del_vqs(vcrypto);
571	return err;
572}
573#endif
574
575static const unsigned int features[] = {
576	/* none */
577};
578
579static const struct virtio_device_id id_table[] = {
580	{ VIRTIO_ID_CRYPTO, VIRTIO_DEV_ANY_ID },
581	{ 0 },
582};
583
584static struct virtio_driver virtio_crypto_driver = {
585	.driver.name         = KBUILD_MODNAME,
586	.driver.owner        = THIS_MODULE,
587	.feature_table       = features,
588	.feature_table_size  = ARRAY_SIZE(features),
589	.id_table            = id_table,
590	.probe               = virtcrypto_probe,
591	.remove              = virtcrypto_remove,
592	.config_changed = virtcrypto_config_changed,
593#ifdef CONFIG_PM_SLEEP
594	.freeze = virtcrypto_freeze,
595	.restore = virtcrypto_restore,
596#endif
597};
598
599module_virtio_driver(virtio_crypto_driver);
600
601MODULE_DEVICE_TABLE(virtio, id_table);
602MODULE_DESCRIPTION("virtio crypto device driver");
603MODULE_LICENSE("GPL");
604MODULE_AUTHOR("Gonglei <arei.gonglei@huawei.com>");