Loading...
1/* Common header for Virtio crypto device.
2 *
3 * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 */
18
19#ifndef _VIRTIO_CRYPTO_COMMON_H
20#define _VIRTIO_CRYPTO_COMMON_H
21
22#include <linux/virtio.h>
23#include <linux/crypto.h>
24#include <linux/spinlock.h>
25#include <crypto/aead.h>
26#include <crypto/aes.h>
27#include <crypto/authenc.h>
28
29
30/* Internal representation of a data virtqueue */
31struct data_queue {
32 /* Virtqueue associated with this send _queue */
33 struct virtqueue *vq;
34
35 /* To protect the vq operations for the dataq */
36 spinlock_t lock;
37
38 /* Name of the tx queue: dataq.$index */
39 char name[32];
40};
41
42struct virtio_crypto {
43 struct virtio_device *vdev;
44 struct virtqueue *ctrl_vq;
45 struct data_queue *data_vq;
46
47 /* To protect the vq operations for the controlq */
48 spinlock_t ctrl_lock;
49
50 /* Maximum of data queues supported by the device */
51 u32 max_data_queues;
52
53 /* Number of queue currently used by the driver */
54 u32 curr_queue;
55
56 /* Maximum length of cipher key */
57 u32 max_cipher_key_len;
58 /* Maximum length of authenticated key */
59 u32 max_auth_key_len;
60 /* Maximum size of per request */
61 u64 max_size;
62
63 /* Control VQ buffers: protected by the ctrl_lock */
64 struct virtio_crypto_op_ctrl_req ctrl;
65 struct virtio_crypto_session_input input;
66 struct virtio_crypto_inhdr ctrl_status;
67
68 unsigned long status;
69 atomic_t ref_count;
70 struct list_head list;
71 struct module *owner;
72 uint8_t dev_id;
73
74 /* Does the affinity hint is set for virtqueues? */
75 bool affinity_hint_set;
76};
77
78struct virtio_crypto_sym_session_info {
79 /* Backend session id, which come from the host side */
80 __u64 session_id;
81};
82
83struct virtio_crypto_ablkcipher_ctx {
84 struct virtio_crypto *vcrypto;
85 struct crypto_tfm *tfm;
86
87 struct virtio_crypto_sym_session_info enc_sess_info;
88 struct virtio_crypto_sym_session_info dec_sess_info;
89};
90
91struct virtio_crypto_request {
92 /* Cipher or aead */
93 uint32_t type;
94 uint8_t status;
95 struct virtio_crypto_ablkcipher_ctx *ablkcipher_ctx;
96 struct ablkcipher_request *ablkcipher_req;
97 struct virtio_crypto_op_data_req *req_data;
98 struct scatterlist **sgs;
99 uint8_t *iv;
100};
101
102int virtcrypto_devmgr_add_dev(struct virtio_crypto *vcrypto_dev);
103struct list_head *virtcrypto_devmgr_get_head(void);
104void virtcrypto_devmgr_rm_dev(struct virtio_crypto *vcrypto_dev);
105struct virtio_crypto *virtcrypto_devmgr_get_first(void);
106int virtcrypto_dev_in_use(struct virtio_crypto *vcrypto_dev);
107int virtcrypto_dev_get(struct virtio_crypto *vcrypto_dev);
108void virtcrypto_dev_put(struct virtio_crypto *vcrypto_dev);
109int virtcrypto_dev_started(struct virtio_crypto *vcrypto_dev);
110struct virtio_crypto *virtcrypto_get_dev_node(int node);
111int virtcrypto_dev_start(struct virtio_crypto *vcrypto);
112void virtcrypto_dev_stop(struct virtio_crypto *vcrypto);
113
114static inline int virtio_crypto_get_current_node(void)
115{
116 int cpu, node;
117
118 cpu = get_cpu();
119 node = topology_physical_package_id(cpu);
120 put_cpu();
121
122 return node;
123}
124
125int virtio_crypto_algs_register(void);
126void virtio_crypto_algs_unregister(void);
127
128#endif /* _VIRTIO_CRYPTO_COMMON_H */
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/* Common header for Virtio crypto device.
3 *
4 * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
5 */
6
7#ifndef _VIRTIO_CRYPTO_COMMON_H
8#define _VIRTIO_CRYPTO_COMMON_H
9
10#include <linux/virtio.h>
11#include <linux/crypto.h>
12#include <linux/spinlock.h>
13#include <crypto/aead.h>
14#include <crypto/aes.h>
15#include <crypto/engine.h>
16
17
18/* Internal representation of a data virtqueue */
19struct data_queue {
20 /* Virtqueue associated with this send _queue */
21 struct virtqueue *vq;
22
23 /* To protect the vq operations for the dataq */
24 spinlock_t lock;
25
26 /* Name of the tx queue: dataq.$index */
27 char name[32];
28
29 struct crypto_engine *engine;
30};
31
32struct virtio_crypto {
33 struct virtio_device *vdev;
34 struct virtqueue *ctrl_vq;
35 struct data_queue *data_vq;
36
37 /* To protect the vq operations for the controlq */
38 spinlock_t ctrl_lock;
39
40 /* Maximum of data queues supported by the device */
41 u32 max_data_queues;
42
43 /* Number of queue currently used by the driver */
44 u32 curr_queue;
45
46 /*
47 * Specifies the services mask which the device support,
48 * see VIRTIO_CRYPTO_SERVICE_*
49 */
50 u32 crypto_services;
51
52 /* Detailed algorithms mask */
53 u32 cipher_algo_l;
54 u32 cipher_algo_h;
55 u32 hash_algo;
56 u32 mac_algo_l;
57 u32 mac_algo_h;
58 u32 aead_algo;
59
60 /* Maximum length of cipher key */
61 u32 max_cipher_key_len;
62 /* Maximum length of authenticated key */
63 u32 max_auth_key_len;
64 /* Maximum size of per request */
65 u64 max_size;
66
67 /* Control VQ buffers: protected by the ctrl_lock */
68 struct virtio_crypto_op_ctrl_req ctrl;
69 struct virtio_crypto_session_input input;
70 struct virtio_crypto_inhdr ctrl_status;
71
72 unsigned long status;
73 atomic_t ref_count;
74 struct list_head list;
75 struct module *owner;
76 uint8_t dev_id;
77
78 /* Does the affinity hint is set for virtqueues? */
79 bool affinity_hint_set;
80};
81
82struct virtio_crypto_sym_session_info {
83 /* Backend session id, which come from the host side */
84 __u64 session_id;
85};
86
87struct virtio_crypto_request;
88typedef void (*virtio_crypto_data_callback)
89 (struct virtio_crypto_request *vc_req, int len);
90
91struct virtio_crypto_request {
92 uint8_t status;
93 struct virtio_crypto_op_data_req *req_data;
94 struct scatterlist **sgs;
95 struct data_queue *dataq;
96 virtio_crypto_data_callback alg_cb;
97};
98
99int virtcrypto_devmgr_add_dev(struct virtio_crypto *vcrypto_dev);
100struct list_head *virtcrypto_devmgr_get_head(void);
101void virtcrypto_devmgr_rm_dev(struct virtio_crypto *vcrypto_dev);
102struct virtio_crypto *virtcrypto_devmgr_get_first(void);
103int virtcrypto_dev_in_use(struct virtio_crypto *vcrypto_dev);
104int virtcrypto_dev_get(struct virtio_crypto *vcrypto_dev);
105void virtcrypto_dev_put(struct virtio_crypto *vcrypto_dev);
106int virtcrypto_dev_started(struct virtio_crypto *vcrypto_dev);
107bool virtcrypto_algo_is_supported(struct virtio_crypto *vcrypto_dev,
108 uint32_t service,
109 uint32_t algo);
110struct virtio_crypto *virtcrypto_get_dev_node(int node,
111 uint32_t service,
112 uint32_t algo);
113int virtcrypto_dev_start(struct virtio_crypto *vcrypto);
114void virtcrypto_dev_stop(struct virtio_crypto *vcrypto);
115int virtio_crypto_skcipher_crypt_req(
116 struct crypto_engine *engine, void *vreq);
117
118void
119virtcrypto_clear_request(struct virtio_crypto_request *vc_req);
120
121static inline int virtio_crypto_get_current_node(void)
122{
123 int cpu, node;
124
125 cpu = get_cpu();
126 node = topology_physical_package_id(cpu);
127 put_cpu();
128
129 return node;
130}
131
132int virtio_crypto_algs_register(struct virtio_crypto *vcrypto);
133void virtio_crypto_algs_unregister(struct virtio_crypto *vcrypto);
134
135#endif /* _VIRTIO_CRYPTO_COMMON_H */