Loading...
1/*
2 * Handle async block request by crypto hardware engine.
3 *
4 * Copyright (C) 2016 Linaro, Inc.
5 *
6 * Author: Baolin Wang <baolin.wang@linaro.org>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 *
13 */
14
15#include <linux/err.h>
16#include <linux/delay.h>
17#include <crypto/engine.h>
18#include <uapi/linux/sched/types.h>
19#include "internal.h"
20
21#define CRYPTO_ENGINE_MAX_QLEN 10
22
23/**
24 * crypto_finalize_request - finalize one request if the request is done
25 * @engine: the hardware engine
26 * @req: the request need to be finalized
27 * @err: error number
28 */
29static void crypto_finalize_request(struct crypto_engine *engine,
30 struct crypto_async_request *req, int err)
31{
32 unsigned long flags;
33 bool finalize_cur_req = false;
34 int ret;
35 struct crypto_engine_ctx *enginectx;
36
37 spin_lock_irqsave(&engine->queue_lock, flags);
38 if (engine->cur_req == req)
39 finalize_cur_req = true;
40 spin_unlock_irqrestore(&engine->queue_lock, flags);
41
42 if (finalize_cur_req) {
43 enginectx = crypto_tfm_ctx(req->tfm);
44 if (engine->cur_req_prepared &&
45 enginectx->op.unprepare_request) {
46 ret = enginectx->op.unprepare_request(engine, req);
47 if (ret)
48 dev_err(engine->dev, "failed to unprepare request\n");
49 }
50 spin_lock_irqsave(&engine->queue_lock, flags);
51 engine->cur_req = NULL;
52 engine->cur_req_prepared = false;
53 spin_unlock_irqrestore(&engine->queue_lock, flags);
54 }
55
56 req->complete(req, err);
57
58 kthread_queue_work(engine->kworker, &engine->pump_requests);
59}
60
61/**
62 * crypto_pump_requests - dequeue one request from engine queue to process
63 * @engine: the hardware engine
64 * @in_kthread: true if we are in the context of the request pump thread
65 *
66 * This function checks if there is any request in the engine queue that
67 * needs processing and if so call out to the driver to initialize hardware
68 * and handle each request.
69 */
70static void crypto_pump_requests(struct crypto_engine *engine,
71 bool in_kthread)
72{
73 struct crypto_async_request *async_req, *backlog;
74 unsigned long flags;
75 bool was_busy = false;
76 int ret;
77 struct crypto_engine_ctx *enginectx;
78
79 spin_lock_irqsave(&engine->queue_lock, flags);
80
81 /* Make sure we are not already running a request */
82 if (engine->cur_req)
83 goto out;
84
85 /* If another context is idling then defer */
86 if (engine->idling) {
87 kthread_queue_work(engine->kworker, &engine->pump_requests);
88 goto out;
89 }
90
91 /* Check if the engine queue is idle */
92 if (!crypto_queue_len(&engine->queue) || !engine->running) {
93 if (!engine->busy)
94 goto out;
95
96 /* Only do teardown in the thread */
97 if (!in_kthread) {
98 kthread_queue_work(engine->kworker,
99 &engine->pump_requests);
100 goto out;
101 }
102
103 engine->busy = false;
104 engine->idling = true;
105 spin_unlock_irqrestore(&engine->queue_lock, flags);
106
107 if (engine->unprepare_crypt_hardware &&
108 engine->unprepare_crypt_hardware(engine))
109 dev_err(engine->dev, "failed to unprepare crypt hardware\n");
110
111 spin_lock_irqsave(&engine->queue_lock, flags);
112 engine->idling = false;
113 goto out;
114 }
115
116 /* Get the fist request from the engine queue to handle */
117 backlog = crypto_get_backlog(&engine->queue);
118 async_req = crypto_dequeue_request(&engine->queue);
119 if (!async_req)
120 goto out;
121
122 engine->cur_req = async_req;
123 if (backlog)
124 backlog->complete(backlog, -EINPROGRESS);
125
126 if (engine->busy)
127 was_busy = true;
128 else
129 engine->busy = true;
130
131 spin_unlock_irqrestore(&engine->queue_lock, flags);
132
133 /* Until here we get the request need to be encrypted successfully */
134 if (!was_busy && engine->prepare_crypt_hardware) {
135 ret = engine->prepare_crypt_hardware(engine);
136 if (ret) {
137 dev_err(engine->dev, "failed to prepare crypt hardware\n");
138 goto req_err;
139 }
140 }
141
142 enginectx = crypto_tfm_ctx(async_req->tfm);
143
144 if (enginectx->op.prepare_request) {
145 ret = enginectx->op.prepare_request(engine, async_req);
146 if (ret) {
147 dev_err(engine->dev, "failed to prepare request: %d\n",
148 ret);
149 goto req_err;
150 }
151 engine->cur_req_prepared = true;
152 }
153 if (!enginectx->op.do_one_request) {
154 dev_err(engine->dev, "failed to do request\n");
155 ret = -EINVAL;
156 goto req_err;
157 }
158 ret = enginectx->op.do_one_request(engine, async_req);
159 if (ret) {
160 dev_err(engine->dev, "Failed to do one request from queue: %d\n", ret);
161 goto req_err;
162 }
163 return;
164
165req_err:
166 crypto_finalize_request(engine, async_req, ret);
167 return;
168
169out:
170 spin_unlock_irqrestore(&engine->queue_lock, flags);
171}
172
173static void crypto_pump_work(struct kthread_work *work)
174{
175 struct crypto_engine *engine =
176 container_of(work, struct crypto_engine, pump_requests);
177
178 crypto_pump_requests(engine, true);
179}
180
181/**
182 * crypto_transfer_request - transfer the new request into the engine queue
183 * @engine: the hardware engine
184 * @req: the request need to be listed into the engine queue
185 */
186static int crypto_transfer_request(struct crypto_engine *engine,
187 struct crypto_async_request *req,
188 bool need_pump)
189{
190 unsigned long flags;
191 int ret;
192
193 spin_lock_irqsave(&engine->queue_lock, flags);
194
195 if (!engine->running) {
196 spin_unlock_irqrestore(&engine->queue_lock, flags);
197 return -ESHUTDOWN;
198 }
199
200 ret = crypto_enqueue_request(&engine->queue, req);
201
202 if (!engine->busy && need_pump)
203 kthread_queue_work(engine->kworker, &engine->pump_requests);
204
205 spin_unlock_irqrestore(&engine->queue_lock, flags);
206 return ret;
207}
208
209/**
210 * crypto_transfer_request_to_engine - transfer one request to list
211 * into the engine queue
212 * @engine: the hardware engine
213 * @req: the request need to be listed into the engine queue
214 */
215static int crypto_transfer_request_to_engine(struct crypto_engine *engine,
216 struct crypto_async_request *req)
217{
218 return crypto_transfer_request(engine, req, true);
219}
220
221/**
222 * crypto_transfer_ablkcipher_request_to_engine - transfer one ablkcipher_request
223 * to list into the engine queue
224 * @engine: the hardware engine
225 * @req: the request need to be listed into the engine queue
226 * TODO: Remove this function when skcipher conversion is finished
227 */
228int crypto_transfer_ablkcipher_request_to_engine(struct crypto_engine *engine,
229 struct ablkcipher_request *req)
230{
231 return crypto_transfer_request_to_engine(engine, &req->base);
232}
233EXPORT_SYMBOL_GPL(crypto_transfer_ablkcipher_request_to_engine);
234
235/**
236 * crypto_transfer_aead_request_to_engine - transfer one aead_request
237 * to list into the engine queue
238 * @engine: the hardware engine
239 * @req: the request need to be listed into the engine queue
240 */
241int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine,
242 struct aead_request *req)
243{
244 return crypto_transfer_request_to_engine(engine, &req->base);
245}
246EXPORT_SYMBOL_GPL(crypto_transfer_aead_request_to_engine);
247
248/**
249 * crypto_transfer_akcipher_request_to_engine - transfer one akcipher_request
250 * to list into the engine queue
251 * @engine: the hardware engine
252 * @req: the request need to be listed into the engine queue
253 */
254int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
255 struct akcipher_request *req)
256{
257 return crypto_transfer_request_to_engine(engine, &req->base);
258}
259EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_request_to_engine);
260
261/**
262 * crypto_transfer_hash_request_to_engine - transfer one ahash_request
263 * to list into the engine queue
264 * @engine: the hardware engine
265 * @req: the request need to be listed into the engine queue
266 */
267int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
268 struct ahash_request *req)
269{
270 return crypto_transfer_request_to_engine(engine, &req->base);
271}
272EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
273
274/**
275 * crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request
276 * to list into the engine queue
277 * @engine: the hardware engine
278 * @req: the request need to be listed into the engine queue
279 */
280int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
281 struct skcipher_request *req)
282{
283 return crypto_transfer_request_to_engine(engine, &req->base);
284}
285EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine);
286
287/**
288 * crypto_finalize_ablkcipher_request - finalize one ablkcipher_request if
289 * the request is done
290 * @engine: the hardware engine
291 * @req: the request need to be finalized
292 * @err: error number
293 * TODO: Remove this function when skcipher conversion is finished
294 */
295void crypto_finalize_ablkcipher_request(struct crypto_engine *engine,
296 struct ablkcipher_request *req, int err)
297{
298 return crypto_finalize_request(engine, &req->base, err);
299}
300EXPORT_SYMBOL_GPL(crypto_finalize_ablkcipher_request);
301
302/**
303 * crypto_finalize_aead_request - finalize one aead_request if
304 * the request is done
305 * @engine: the hardware engine
306 * @req: the request need to be finalized
307 * @err: error number
308 */
309void crypto_finalize_aead_request(struct crypto_engine *engine,
310 struct aead_request *req, int err)
311{
312 return crypto_finalize_request(engine, &req->base, err);
313}
314EXPORT_SYMBOL_GPL(crypto_finalize_aead_request);
315
316/**
317 * crypto_finalize_akcipher_request - finalize one akcipher_request if
318 * the request is done
319 * @engine: the hardware engine
320 * @req: the request need to be finalized
321 * @err: error number
322 */
323void crypto_finalize_akcipher_request(struct crypto_engine *engine,
324 struct akcipher_request *req, int err)
325{
326 return crypto_finalize_request(engine, &req->base, err);
327}
328EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_request);
329
330/**
331 * crypto_finalize_hash_request - finalize one ahash_request if
332 * the request is done
333 * @engine: the hardware engine
334 * @req: the request need to be finalized
335 * @err: error number
336 */
337void crypto_finalize_hash_request(struct crypto_engine *engine,
338 struct ahash_request *req, int err)
339{
340 return crypto_finalize_request(engine, &req->base, err);
341}
342EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
343
344/**
345 * crypto_finalize_skcipher_request - finalize one skcipher_request if
346 * the request is done
347 * @engine: the hardware engine
348 * @req: the request need to be finalized
349 * @err: error number
350 */
351void crypto_finalize_skcipher_request(struct crypto_engine *engine,
352 struct skcipher_request *req, int err)
353{
354 return crypto_finalize_request(engine, &req->base, err);
355}
356EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_request);
357
358/**
359 * crypto_engine_start - start the hardware engine
360 * @engine: the hardware engine need to be started
361 *
362 * Return 0 on success, else on fail.
363 */
364int crypto_engine_start(struct crypto_engine *engine)
365{
366 unsigned long flags;
367
368 spin_lock_irqsave(&engine->queue_lock, flags);
369
370 if (engine->running || engine->busy) {
371 spin_unlock_irqrestore(&engine->queue_lock, flags);
372 return -EBUSY;
373 }
374
375 engine->running = true;
376 spin_unlock_irqrestore(&engine->queue_lock, flags);
377
378 kthread_queue_work(engine->kworker, &engine->pump_requests);
379
380 return 0;
381}
382EXPORT_SYMBOL_GPL(crypto_engine_start);
383
384/**
385 * crypto_engine_stop - stop the hardware engine
386 * @engine: the hardware engine need to be stopped
387 *
388 * Return 0 on success, else on fail.
389 */
390int crypto_engine_stop(struct crypto_engine *engine)
391{
392 unsigned long flags;
393 unsigned int limit = 500;
394 int ret = 0;
395
396 spin_lock_irqsave(&engine->queue_lock, flags);
397
398 /*
399 * If the engine queue is not empty or the engine is on busy state,
400 * we need to wait for a while to pump the requests of engine queue.
401 */
402 while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
403 spin_unlock_irqrestore(&engine->queue_lock, flags);
404 msleep(20);
405 spin_lock_irqsave(&engine->queue_lock, flags);
406 }
407
408 if (crypto_queue_len(&engine->queue) || engine->busy)
409 ret = -EBUSY;
410 else
411 engine->running = false;
412
413 spin_unlock_irqrestore(&engine->queue_lock, flags);
414
415 if (ret)
416 dev_warn(engine->dev, "could not stop engine\n");
417
418 return ret;
419}
420EXPORT_SYMBOL_GPL(crypto_engine_stop);
421
422/**
423 * crypto_engine_alloc_init - allocate crypto hardware engine structure and
424 * initialize it.
425 * @dev: the device attached with one hardware engine
426 * @rt: whether this queue is set to run as a realtime task
427 *
428 * This must be called from context that can sleep.
429 * Return: the crypto engine structure on success, else NULL.
430 */
431struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
432{
433 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
434 struct crypto_engine *engine;
435
436 if (!dev)
437 return NULL;
438
439 engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
440 if (!engine)
441 return NULL;
442
443 engine->dev = dev;
444 engine->rt = rt;
445 engine->running = false;
446 engine->busy = false;
447 engine->idling = false;
448 engine->cur_req_prepared = false;
449 engine->priv_data = dev;
450 snprintf(engine->name, sizeof(engine->name),
451 "%s-engine", dev_name(dev));
452
453 crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN);
454 spin_lock_init(&engine->queue_lock);
455
456 engine->kworker = kthread_create_worker(0, "%s", engine->name);
457 if (IS_ERR(engine->kworker)) {
458 dev_err(dev, "failed to create crypto request pump task\n");
459 return NULL;
460 }
461 kthread_init_work(&engine->pump_requests, crypto_pump_work);
462
463 if (engine->rt) {
464 dev_info(dev, "will run requests pump with realtime priority\n");
465 sched_setscheduler(engine->kworker->task, SCHED_FIFO, ¶m);
466 }
467
468 return engine;
469}
470EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
471
472/**
473 * crypto_engine_exit - free the resources of hardware engine when exit
474 * @engine: the hardware engine need to be freed
475 *
476 * Return 0 for success.
477 */
478int crypto_engine_exit(struct crypto_engine *engine)
479{
480 int ret;
481
482 ret = crypto_engine_stop(engine);
483 if (ret)
484 return ret;
485
486 kthread_destroy_worker(engine->kworker);
487
488 return 0;
489}
490EXPORT_SYMBOL_GPL(crypto_engine_exit);
491
492MODULE_LICENSE("GPL");
493MODULE_DESCRIPTION("Crypto hardware engine framework");
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Handle async block request by crypto hardware engine.
4 *
5 * Copyright (C) 2016 Linaro, Inc.
6 *
7 * Author: Baolin Wang <baolin.wang@linaro.org>
8 */
9
10#include <linux/err.h>
11#include <linux/delay.h>
12#include <crypto/engine.h>
13#include <uapi/linux/sched/types.h>
14#include "internal.h"
15
16#define CRYPTO_ENGINE_MAX_QLEN 10
17
18/**
19 * crypto_finalize_request - finalize one request if the request is done
20 * @engine: the hardware engine
21 * @req: the request need to be finalized
22 * @err: error number
23 */
24static void crypto_finalize_request(struct crypto_engine *engine,
25 struct crypto_async_request *req, int err)
26{
27 unsigned long flags;
28 bool finalize_req = false;
29 int ret;
30 struct crypto_engine_ctx *enginectx;
31
32 /*
33 * If hardware cannot enqueue more requests
34 * and retry mechanism is not supported
35 * make sure we are completing the current request
36 */
37 if (!engine->retry_support) {
38 spin_lock_irqsave(&engine->queue_lock, flags);
39 if (engine->cur_req == req) {
40 finalize_req = true;
41 engine->cur_req = NULL;
42 }
43 spin_unlock_irqrestore(&engine->queue_lock, flags);
44 }
45
46 if (finalize_req || engine->retry_support) {
47 enginectx = crypto_tfm_ctx(req->tfm);
48 if (enginectx->op.prepare_request &&
49 enginectx->op.unprepare_request) {
50 ret = enginectx->op.unprepare_request(engine, req);
51 if (ret)
52 dev_err(engine->dev, "failed to unprepare request\n");
53 }
54 }
55 req->complete(req, err);
56
57 kthread_queue_work(engine->kworker, &engine->pump_requests);
58}
59
60/**
61 * crypto_pump_requests - dequeue one request from engine queue to process
62 * @engine: the hardware engine
63 * @in_kthread: true if we are in the context of the request pump thread
64 *
65 * This function checks if there is any request in the engine queue that
66 * needs processing and if so call out to the driver to initialize hardware
67 * and handle each request.
68 */
69static void crypto_pump_requests(struct crypto_engine *engine,
70 bool in_kthread)
71{
72 struct crypto_async_request *async_req, *backlog;
73 unsigned long flags;
74 bool was_busy = false;
75 int ret;
76 struct crypto_engine_ctx *enginectx;
77
78 spin_lock_irqsave(&engine->queue_lock, flags);
79
80 /* Make sure we are not already running a request */
81 if (!engine->retry_support && engine->cur_req)
82 goto out;
83
84 /* If another context is idling then defer */
85 if (engine->idling) {
86 kthread_queue_work(engine->kworker, &engine->pump_requests);
87 goto out;
88 }
89
90 /* Check if the engine queue is idle */
91 if (!crypto_queue_len(&engine->queue) || !engine->running) {
92 if (!engine->busy)
93 goto out;
94
95 /* Only do teardown in the thread */
96 if (!in_kthread) {
97 kthread_queue_work(engine->kworker,
98 &engine->pump_requests);
99 goto out;
100 }
101
102 engine->busy = false;
103 engine->idling = true;
104 spin_unlock_irqrestore(&engine->queue_lock, flags);
105
106 if (engine->unprepare_crypt_hardware &&
107 engine->unprepare_crypt_hardware(engine))
108 dev_err(engine->dev, "failed to unprepare crypt hardware\n");
109
110 spin_lock_irqsave(&engine->queue_lock, flags);
111 engine->idling = false;
112 goto out;
113 }
114
115start_request:
116 /* Get the fist request from the engine queue to handle */
117 backlog = crypto_get_backlog(&engine->queue);
118 async_req = crypto_dequeue_request(&engine->queue);
119 if (!async_req)
120 goto out;
121
122 /*
123 * If hardware doesn't support the retry mechanism,
124 * keep track of the request we are processing now.
125 * We'll need it on completion (crypto_finalize_request).
126 */
127 if (!engine->retry_support)
128 engine->cur_req = async_req;
129
130 if (backlog)
131 backlog->complete(backlog, -EINPROGRESS);
132
133 if (engine->busy)
134 was_busy = true;
135 else
136 engine->busy = true;
137
138 spin_unlock_irqrestore(&engine->queue_lock, flags);
139
140 /* Until here we get the request need to be encrypted successfully */
141 if (!was_busy && engine->prepare_crypt_hardware) {
142 ret = engine->prepare_crypt_hardware(engine);
143 if (ret) {
144 dev_err(engine->dev, "failed to prepare crypt hardware\n");
145 goto req_err_2;
146 }
147 }
148
149 enginectx = crypto_tfm_ctx(async_req->tfm);
150
151 if (enginectx->op.prepare_request) {
152 ret = enginectx->op.prepare_request(engine, async_req);
153 if (ret) {
154 dev_err(engine->dev, "failed to prepare request: %d\n",
155 ret);
156 goto req_err_2;
157 }
158 }
159 if (!enginectx->op.do_one_request) {
160 dev_err(engine->dev, "failed to do request\n");
161 ret = -EINVAL;
162 goto req_err_1;
163 }
164
165 ret = enginectx->op.do_one_request(engine, async_req);
166
167 /* Request unsuccessfully executed by hardware */
168 if (ret < 0) {
169 /*
170 * If hardware queue is full (-ENOSPC), requeue request
171 * regardless of backlog flag.
172 * Otherwise, unprepare and complete the request.
173 */
174 if (!engine->retry_support ||
175 (ret != -ENOSPC)) {
176 dev_err(engine->dev,
177 "Failed to do one request from queue: %d\n",
178 ret);
179 goto req_err_1;
180 }
181 /*
182 * If retry mechanism is supported,
183 * unprepare current request and
184 * enqueue it back into crypto-engine queue.
185 */
186 if (enginectx->op.unprepare_request) {
187 ret = enginectx->op.unprepare_request(engine,
188 async_req);
189 if (ret)
190 dev_err(engine->dev,
191 "failed to unprepare request\n");
192 }
193 spin_lock_irqsave(&engine->queue_lock, flags);
194 /*
195 * If hardware was unable to execute request, enqueue it
196 * back in front of crypto-engine queue, to keep the order
197 * of requests.
198 */
199 crypto_enqueue_request_head(&engine->queue, async_req);
200
201 kthread_queue_work(engine->kworker, &engine->pump_requests);
202 goto out;
203 }
204
205 goto retry;
206
207req_err_1:
208 if (enginectx->op.unprepare_request) {
209 ret = enginectx->op.unprepare_request(engine, async_req);
210 if (ret)
211 dev_err(engine->dev, "failed to unprepare request\n");
212 }
213
214req_err_2:
215 async_req->complete(async_req, ret);
216
217retry:
218 /* If retry mechanism is supported, send new requests to engine */
219 if (engine->retry_support) {
220 spin_lock_irqsave(&engine->queue_lock, flags);
221 goto start_request;
222 }
223 return;
224
225out:
226 spin_unlock_irqrestore(&engine->queue_lock, flags);
227
228 /*
229 * Batch requests is possible only if
230 * hardware can enqueue multiple requests
231 */
232 if (engine->do_batch_requests) {
233 ret = engine->do_batch_requests(engine);
234 if (ret)
235 dev_err(engine->dev, "failed to do batch requests: %d\n",
236 ret);
237 }
238
239 return;
240}
241
242static void crypto_pump_work(struct kthread_work *work)
243{
244 struct crypto_engine *engine =
245 container_of(work, struct crypto_engine, pump_requests);
246
247 crypto_pump_requests(engine, true);
248}
249
250/**
251 * crypto_transfer_request - transfer the new request into the engine queue
252 * @engine: the hardware engine
253 * @req: the request need to be listed into the engine queue
254 */
255static int crypto_transfer_request(struct crypto_engine *engine,
256 struct crypto_async_request *req,
257 bool need_pump)
258{
259 unsigned long flags;
260 int ret;
261
262 spin_lock_irqsave(&engine->queue_lock, flags);
263
264 if (!engine->running) {
265 spin_unlock_irqrestore(&engine->queue_lock, flags);
266 return -ESHUTDOWN;
267 }
268
269 ret = crypto_enqueue_request(&engine->queue, req);
270
271 if (!engine->busy && need_pump)
272 kthread_queue_work(engine->kworker, &engine->pump_requests);
273
274 spin_unlock_irqrestore(&engine->queue_lock, flags);
275 return ret;
276}
277
278/**
279 * crypto_transfer_request_to_engine - transfer one request to list
280 * into the engine queue
281 * @engine: the hardware engine
282 * @req: the request need to be listed into the engine queue
283 */
284static int crypto_transfer_request_to_engine(struct crypto_engine *engine,
285 struct crypto_async_request *req)
286{
287 return crypto_transfer_request(engine, req, true);
288}
289
290/**
291 * crypto_transfer_aead_request_to_engine - transfer one aead_request
292 * to list into the engine queue
293 * @engine: the hardware engine
294 * @req: the request need to be listed into the engine queue
295 */
296int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine,
297 struct aead_request *req)
298{
299 return crypto_transfer_request_to_engine(engine, &req->base);
300}
301EXPORT_SYMBOL_GPL(crypto_transfer_aead_request_to_engine);
302
303/**
304 * crypto_transfer_akcipher_request_to_engine - transfer one akcipher_request
305 * to list into the engine queue
306 * @engine: the hardware engine
307 * @req: the request need to be listed into the engine queue
308 */
309int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
310 struct akcipher_request *req)
311{
312 return crypto_transfer_request_to_engine(engine, &req->base);
313}
314EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_request_to_engine);
315
316/**
317 * crypto_transfer_hash_request_to_engine - transfer one ahash_request
318 * to list into the engine queue
319 * @engine: the hardware engine
320 * @req: the request need to be listed into the engine queue
321 */
322int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
323 struct ahash_request *req)
324{
325 return crypto_transfer_request_to_engine(engine, &req->base);
326}
327EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
328
329/**
330 * crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request
331 * to list into the engine queue
332 * @engine: the hardware engine
333 * @req: the request need to be listed into the engine queue
334 */
335int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
336 struct skcipher_request *req)
337{
338 return crypto_transfer_request_to_engine(engine, &req->base);
339}
340EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine);
341
342/**
343 * crypto_finalize_aead_request - finalize one aead_request if
344 * the request is done
345 * @engine: the hardware engine
346 * @req: the request need to be finalized
347 * @err: error number
348 */
349void crypto_finalize_aead_request(struct crypto_engine *engine,
350 struct aead_request *req, int err)
351{
352 return crypto_finalize_request(engine, &req->base, err);
353}
354EXPORT_SYMBOL_GPL(crypto_finalize_aead_request);
355
356/**
357 * crypto_finalize_akcipher_request - finalize one akcipher_request if
358 * the request is done
359 * @engine: the hardware engine
360 * @req: the request need to be finalized
361 * @err: error number
362 */
363void crypto_finalize_akcipher_request(struct crypto_engine *engine,
364 struct akcipher_request *req, int err)
365{
366 return crypto_finalize_request(engine, &req->base, err);
367}
368EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_request);
369
370/**
371 * crypto_finalize_hash_request - finalize one ahash_request if
372 * the request is done
373 * @engine: the hardware engine
374 * @req: the request need to be finalized
375 * @err: error number
376 */
377void crypto_finalize_hash_request(struct crypto_engine *engine,
378 struct ahash_request *req, int err)
379{
380 return crypto_finalize_request(engine, &req->base, err);
381}
382EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
383
384/**
385 * crypto_finalize_skcipher_request - finalize one skcipher_request if
386 * the request is done
387 * @engine: the hardware engine
388 * @req: the request need to be finalized
389 * @err: error number
390 */
391void crypto_finalize_skcipher_request(struct crypto_engine *engine,
392 struct skcipher_request *req, int err)
393{
394 return crypto_finalize_request(engine, &req->base, err);
395}
396EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_request);
397
398/**
399 * crypto_engine_start - start the hardware engine
400 * @engine: the hardware engine need to be started
401 *
402 * Return 0 on success, else on fail.
403 */
404int crypto_engine_start(struct crypto_engine *engine)
405{
406 unsigned long flags;
407
408 spin_lock_irqsave(&engine->queue_lock, flags);
409
410 if (engine->running || engine->busy) {
411 spin_unlock_irqrestore(&engine->queue_lock, flags);
412 return -EBUSY;
413 }
414
415 engine->running = true;
416 spin_unlock_irqrestore(&engine->queue_lock, flags);
417
418 kthread_queue_work(engine->kworker, &engine->pump_requests);
419
420 return 0;
421}
422EXPORT_SYMBOL_GPL(crypto_engine_start);
423
424/**
425 * crypto_engine_stop - stop the hardware engine
426 * @engine: the hardware engine need to be stopped
427 *
428 * Return 0 on success, else on fail.
429 */
430int crypto_engine_stop(struct crypto_engine *engine)
431{
432 unsigned long flags;
433 unsigned int limit = 500;
434 int ret = 0;
435
436 spin_lock_irqsave(&engine->queue_lock, flags);
437
438 /*
439 * If the engine queue is not empty or the engine is on busy state,
440 * we need to wait for a while to pump the requests of engine queue.
441 */
442 while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
443 spin_unlock_irqrestore(&engine->queue_lock, flags);
444 msleep(20);
445 spin_lock_irqsave(&engine->queue_lock, flags);
446 }
447
448 if (crypto_queue_len(&engine->queue) || engine->busy)
449 ret = -EBUSY;
450 else
451 engine->running = false;
452
453 spin_unlock_irqrestore(&engine->queue_lock, flags);
454
455 if (ret)
456 dev_warn(engine->dev, "could not stop engine\n");
457
458 return ret;
459}
460EXPORT_SYMBOL_GPL(crypto_engine_stop);
461
462/**
463 * crypto_engine_alloc_init_and_set - allocate crypto hardware engine structure
464 * and initialize it by setting the maximum number of entries in the software
465 * crypto-engine queue.
466 * @dev: the device attached with one hardware engine
467 * @retry_support: whether hardware has support for retry mechanism
468 * @cbk_do_batch: pointer to a callback function to be invoked when executing a
469 * a batch of requests.
470 * This has the form:
471 * callback(struct crypto_engine *engine)
472 * where:
473 * @engine: the crypto engine structure.
474 * @rt: whether this queue is set to run as a realtime task
475 * @qlen: maximum size of the crypto-engine queue
476 *
477 * This must be called from context that can sleep.
478 * Return: the crypto engine structure on success, else NULL.
479 */
480struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
481 bool retry_support,
482 int (*cbk_do_batch)(struct crypto_engine *engine),
483 bool rt, int qlen)
484{
485 struct crypto_engine *engine;
486
487 if (!dev)
488 return NULL;
489
490 engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
491 if (!engine)
492 return NULL;
493
494 engine->dev = dev;
495 engine->rt = rt;
496 engine->running = false;
497 engine->busy = false;
498 engine->idling = false;
499 engine->retry_support = retry_support;
500 engine->priv_data = dev;
501 /*
502 * Batch requests is possible only if
503 * hardware has support for retry mechanism.
504 */
505 engine->do_batch_requests = retry_support ? cbk_do_batch : NULL;
506
507 snprintf(engine->name, sizeof(engine->name),
508 "%s-engine", dev_name(dev));
509
510 crypto_init_queue(&engine->queue, qlen);
511 spin_lock_init(&engine->queue_lock);
512
513 engine->kworker = kthread_create_worker(0, "%s", engine->name);
514 if (IS_ERR(engine->kworker)) {
515 dev_err(dev, "failed to create crypto request pump task\n");
516 return NULL;
517 }
518 kthread_init_work(&engine->pump_requests, crypto_pump_work);
519
520 if (engine->rt) {
521 dev_info(dev, "will run requests pump with realtime priority\n");
522 sched_set_fifo(engine->kworker->task);
523 }
524
525 return engine;
526}
527EXPORT_SYMBOL_GPL(crypto_engine_alloc_init_and_set);
528
529/**
530 * crypto_engine_alloc_init - allocate crypto hardware engine structure and
531 * initialize it.
532 * @dev: the device attached with one hardware engine
533 * @rt: whether this queue is set to run as a realtime task
534 *
535 * This must be called from context that can sleep.
536 * Return: the crypto engine structure on success, else NULL.
537 */
538struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
539{
540 return crypto_engine_alloc_init_and_set(dev, false, NULL, rt,
541 CRYPTO_ENGINE_MAX_QLEN);
542}
543EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
544
545/**
546 * crypto_engine_exit - free the resources of hardware engine when exit
547 * @engine: the hardware engine need to be freed
548 *
549 * Return 0 for success.
550 */
551int crypto_engine_exit(struct crypto_engine *engine)
552{
553 int ret;
554
555 ret = crypto_engine_stop(engine);
556 if (ret)
557 return ret;
558
559 kthread_destroy_worker(engine->kworker);
560
561 return 0;
562}
563EXPORT_SYMBOL_GPL(crypto_engine_exit);
564
565MODULE_LICENSE("GPL");
566MODULE_DESCRIPTION("Crypto hardware engine framework");