Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Handle async block request by crypto hardware engine.
4 *
5 * Copyright (C) 2016 Linaro, Inc.
6 *
7 * Author: Baolin Wang <baolin.wang@linaro.org>
8 */
9
10#include <linux/err.h>
11#include <linux/delay.h>
12#include <linux/device.h>
13#include <crypto/engine.h>
14#include <uapi/linux/sched/types.h>
15#include "internal.h"
16
17#define CRYPTO_ENGINE_MAX_QLEN 10
18
19/**
20 * crypto_finalize_request - finalize one request if the request is done
21 * @engine: the hardware engine
22 * @req: the request need to be finalized
23 * @err: error number
24 */
25static void crypto_finalize_request(struct crypto_engine *engine,
26 struct crypto_async_request *req, int err)
27{
28 unsigned long flags;
29 bool finalize_req = false;
30 int ret;
31 struct crypto_engine_ctx *enginectx;
32
33 /*
34 * If hardware cannot enqueue more requests
35 * and retry mechanism is not supported
36 * make sure we are completing the current request
37 */
38 if (!engine->retry_support) {
39 spin_lock_irqsave(&engine->queue_lock, flags);
40 if (engine->cur_req == req) {
41 finalize_req = true;
42 engine->cur_req = NULL;
43 }
44 spin_unlock_irqrestore(&engine->queue_lock, flags);
45 }
46
47 if (finalize_req || engine->retry_support) {
48 enginectx = crypto_tfm_ctx(req->tfm);
49 if (enginectx->op.prepare_request &&
50 enginectx->op.unprepare_request) {
51 ret = enginectx->op.unprepare_request(engine, req);
52 if (ret)
53 dev_err(engine->dev, "failed to unprepare request\n");
54 }
55 }
56 lockdep_assert_in_softirq();
57 req->complete(req, err);
58
59 kthread_queue_work(engine->kworker, &engine->pump_requests);
60}
61
62/**
63 * crypto_pump_requests - dequeue one request from engine queue to process
64 * @engine: the hardware engine
65 * @in_kthread: true if we are in the context of the request pump thread
66 *
67 * This function checks if there is any request in the engine queue that
68 * needs processing and if so call out to the driver to initialize hardware
69 * and handle each request.
70 */
71static void crypto_pump_requests(struct crypto_engine *engine,
72 bool in_kthread)
73{
74 struct crypto_async_request *async_req, *backlog;
75 unsigned long flags;
76 bool was_busy = false;
77 int ret;
78 struct crypto_engine_ctx *enginectx;
79
80 spin_lock_irqsave(&engine->queue_lock, flags);
81
82 /* Make sure we are not already running a request */
83 if (!engine->retry_support && engine->cur_req)
84 goto out;
85
86 /* If another context is idling then defer */
87 if (engine->idling) {
88 kthread_queue_work(engine->kworker, &engine->pump_requests);
89 goto out;
90 }
91
92 /* Check if the engine queue is idle */
93 if (!crypto_queue_len(&engine->queue) || !engine->running) {
94 if (!engine->busy)
95 goto out;
96
97 /* Only do teardown in the thread */
98 if (!in_kthread) {
99 kthread_queue_work(engine->kworker,
100 &engine->pump_requests);
101 goto out;
102 }
103
104 engine->busy = false;
105 engine->idling = true;
106 spin_unlock_irqrestore(&engine->queue_lock, flags);
107
108 if (engine->unprepare_crypt_hardware &&
109 engine->unprepare_crypt_hardware(engine))
110 dev_err(engine->dev, "failed to unprepare crypt hardware\n");
111
112 spin_lock_irqsave(&engine->queue_lock, flags);
113 engine->idling = false;
114 goto out;
115 }
116
117start_request:
118 /* Get the fist request from the engine queue to handle */
119 backlog = crypto_get_backlog(&engine->queue);
120 async_req = crypto_dequeue_request(&engine->queue);
121 if (!async_req)
122 goto out;
123
124 /*
125 * If hardware doesn't support the retry mechanism,
126 * keep track of the request we are processing now.
127 * We'll need it on completion (crypto_finalize_request).
128 */
129 if (!engine->retry_support)
130 engine->cur_req = async_req;
131
132 if (backlog)
133 backlog->complete(backlog, -EINPROGRESS);
134
135 if (engine->busy)
136 was_busy = true;
137 else
138 engine->busy = true;
139
140 spin_unlock_irqrestore(&engine->queue_lock, flags);
141
142 /* Until here we get the request need to be encrypted successfully */
143 if (!was_busy && engine->prepare_crypt_hardware) {
144 ret = engine->prepare_crypt_hardware(engine);
145 if (ret) {
146 dev_err(engine->dev, "failed to prepare crypt hardware\n");
147 goto req_err_2;
148 }
149 }
150
151 enginectx = crypto_tfm_ctx(async_req->tfm);
152
153 if (enginectx->op.prepare_request) {
154 ret = enginectx->op.prepare_request(engine, async_req);
155 if (ret) {
156 dev_err(engine->dev, "failed to prepare request: %d\n",
157 ret);
158 goto req_err_2;
159 }
160 }
161 if (!enginectx->op.do_one_request) {
162 dev_err(engine->dev, "failed to do request\n");
163 ret = -EINVAL;
164 goto req_err_1;
165 }
166
167 ret = enginectx->op.do_one_request(engine, async_req);
168
169 /* Request unsuccessfully executed by hardware */
170 if (ret < 0) {
171 /*
172 * If hardware queue is full (-ENOSPC), requeue request
173 * regardless of backlog flag.
174 * Otherwise, unprepare and complete the request.
175 */
176 if (!engine->retry_support ||
177 (ret != -ENOSPC)) {
178 dev_err(engine->dev,
179 "Failed to do one request from queue: %d\n",
180 ret);
181 goto req_err_1;
182 }
183 /*
184 * If retry mechanism is supported,
185 * unprepare current request and
186 * enqueue it back into crypto-engine queue.
187 */
188 if (enginectx->op.unprepare_request) {
189 ret = enginectx->op.unprepare_request(engine,
190 async_req);
191 if (ret)
192 dev_err(engine->dev,
193 "failed to unprepare request\n");
194 }
195 spin_lock_irqsave(&engine->queue_lock, flags);
196 /*
197 * If hardware was unable to execute request, enqueue it
198 * back in front of crypto-engine queue, to keep the order
199 * of requests.
200 */
201 crypto_enqueue_request_head(&engine->queue, async_req);
202
203 kthread_queue_work(engine->kworker, &engine->pump_requests);
204 goto out;
205 }
206
207 goto retry;
208
209req_err_1:
210 if (enginectx->op.unprepare_request) {
211 ret = enginectx->op.unprepare_request(engine, async_req);
212 if (ret)
213 dev_err(engine->dev, "failed to unprepare request\n");
214 }
215
216req_err_2:
217 async_req->complete(async_req, ret);
218
219retry:
220 /* If retry mechanism is supported, send new requests to engine */
221 if (engine->retry_support) {
222 spin_lock_irqsave(&engine->queue_lock, flags);
223 goto start_request;
224 }
225 return;
226
227out:
228 spin_unlock_irqrestore(&engine->queue_lock, flags);
229
230 /*
231 * Batch requests is possible only if
232 * hardware can enqueue multiple requests
233 */
234 if (engine->do_batch_requests) {
235 ret = engine->do_batch_requests(engine);
236 if (ret)
237 dev_err(engine->dev, "failed to do batch requests: %d\n",
238 ret);
239 }
240
241 return;
242}
243
244static void crypto_pump_work(struct kthread_work *work)
245{
246 struct crypto_engine *engine =
247 container_of(work, struct crypto_engine, pump_requests);
248
249 crypto_pump_requests(engine, true);
250}
251
252/**
253 * crypto_transfer_request - transfer the new request into the engine queue
254 * @engine: the hardware engine
255 * @req: the request need to be listed into the engine queue
256 * @need_pump: indicates whether queue the pump of request to kthread_work
257 */
258static int crypto_transfer_request(struct crypto_engine *engine,
259 struct crypto_async_request *req,
260 bool need_pump)
261{
262 unsigned long flags;
263 int ret;
264
265 spin_lock_irqsave(&engine->queue_lock, flags);
266
267 if (!engine->running) {
268 spin_unlock_irqrestore(&engine->queue_lock, flags);
269 return -ESHUTDOWN;
270 }
271
272 ret = crypto_enqueue_request(&engine->queue, req);
273
274 if (!engine->busy && need_pump)
275 kthread_queue_work(engine->kworker, &engine->pump_requests);
276
277 spin_unlock_irqrestore(&engine->queue_lock, flags);
278 return ret;
279}
280
281/**
282 * crypto_transfer_request_to_engine - transfer one request to list
283 * into the engine queue
284 * @engine: the hardware engine
285 * @req: the request need to be listed into the engine queue
286 */
287static int crypto_transfer_request_to_engine(struct crypto_engine *engine,
288 struct crypto_async_request *req)
289{
290 return crypto_transfer_request(engine, req, true);
291}
292
293/**
294 * crypto_transfer_aead_request_to_engine - transfer one aead_request
295 * to list into the engine queue
296 * @engine: the hardware engine
297 * @req: the request need to be listed into the engine queue
298 */
299int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine,
300 struct aead_request *req)
301{
302 return crypto_transfer_request_to_engine(engine, &req->base);
303}
304EXPORT_SYMBOL_GPL(crypto_transfer_aead_request_to_engine);
305
306/**
307 * crypto_transfer_akcipher_request_to_engine - transfer one akcipher_request
308 * to list into the engine queue
309 * @engine: the hardware engine
310 * @req: the request need to be listed into the engine queue
311 */
312int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
313 struct akcipher_request *req)
314{
315 return crypto_transfer_request_to_engine(engine, &req->base);
316}
317EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_request_to_engine);
318
319/**
320 * crypto_transfer_hash_request_to_engine - transfer one ahash_request
321 * to list into the engine queue
322 * @engine: the hardware engine
323 * @req: the request need to be listed into the engine queue
324 */
325int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
326 struct ahash_request *req)
327{
328 return crypto_transfer_request_to_engine(engine, &req->base);
329}
330EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
331
332/**
333 * crypto_transfer_kpp_request_to_engine - transfer one kpp_request to list
334 * into the engine queue
335 * @engine: the hardware engine
336 * @req: the request need to be listed into the engine queue
337 */
338int crypto_transfer_kpp_request_to_engine(struct crypto_engine *engine,
339 struct kpp_request *req)
340{
341 return crypto_transfer_request_to_engine(engine, &req->base);
342}
343EXPORT_SYMBOL_GPL(crypto_transfer_kpp_request_to_engine);
344
345/**
346 * crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request
347 * to list into the engine queue
348 * @engine: the hardware engine
349 * @req: the request need to be listed into the engine queue
350 */
351int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
352 struct skcipher_request *req)
353{
354 return crypto_transfer_request_to_engine(engine, &req->base);
355}
356EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine);
357
358/**
359 * crypto_finalize_aead_request - finalize one aead_request if
360 * the request is done
361 * @engine: the hardware engine
362 * @req: the request need to be finalized
363 * @err: error number
364 */
365void crypto_finalize_aead_request(struct crypto_engine *engine,
366 struct aead_request *req, int err)
367{
368 return crypto_finalize_request(engine, &req->base, err);
369}
370EXPORT_SYMBOL_GPL(crypto_finalize_aead_request);
371
372/**
373 * crypto_finalize_akcipher_request - finalize one akcipher_request if
374 * the request is done
375 * @engine: the hardware engine
376 * @req: the request need to be finalized
377 * @err: error number
378 */
379void crypto_finalize_akcipher_request(struct crypto_engine *engine,
380 struct akcipher_request *req, int err)
381{
382 return crypto_finalize_request(engine, &req->base, err);
383}
384EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_request);
385
386/**
387 * crypto_finalize_hash_request - finalize one ahash_request if
388 * the request is done
389 * @engine: the hardware engine
390 * @req: the request need to be finalized
391 * @err: error number
392 */
393void crypto_finalize_hash_request(struct crypto_engine *engine,
394 struct ahash_request *req, int err)
395{
396 return crypto_finalize_request(engine, &req->base, err);
397}
398EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
399
400/**
401 * crypto_finalize_kpp_request - finalize one kpp_request if the request is done
402 * @engine: the hardware engine
403 * @req: the request need to be finalized
404 * @err: error number
405 */
406void crypto_finalize_kpp_request(struct crypto_engine *engine,
407 struct kpp_request *req, int err)
408{
409 return crypto_finalize_request(engine, &req->base, err);
410}
411EXPORT_SYMBOL_GPL(crypto_finalize_kpp_request);
412
413/**
414 * crypto_finalize_skcipher_request - finalize one skcipher_request if
415 * the request is done
416 * @engine: the hardware engine
417 * @req: the request need to be finalized
418 * @err: error number
419 */
420void crypto_finalize_skcipher_request(struct crypto_engine *engine,
421 struct skcipher_request *req, int err)
422{
423 return crypto_finalize_request(engine, &req->base, err);
424}
425EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_request);
426
427/**
428 * crypto_engine_start - start the hardware engine
429 * @engine: the hardware engine need to be started
430 *
431 * Return 0 on success, else on fail.
432 */
433int crypto_engine_start(struct crypto_engine *engine)
434{
435 unsigned long flags;
436
437 spin_lock_irqsave(&engine->queue_lock, flags);
438
439 if (engine->running || engine->busy) {
440 spin_unlock_irqrestore(&engine->queue_lock, flags);
441 return -EBUSY;
442 }
443
444 engine->running = true;
445 spin_unlock_irqrestore(&engine->queue_lock, flags);
446
447 kthread_queue_work(engine->kworker, &engine->pump_requests);
448
449 return 0;
450}
451EXPORT_SYMBOL_GPL(crypto_engine_start);
452
453/**
454 * crypto_engine_stop - stop the hardware engine
455 * @engine: the hardware engine need to be stopped
456 *
457 * Return 0 on success, else on fail.
458 */
459int crypto_engine_stop(struct crypto_engine *engine)
460{
461 unsigned long flags;
462 unsigned int limit = 500;
463 int ret = 0;
464
465 spin_lock_irqsave(&engine->queue_lock, flags);
466
467 /*
468 * If the engine queue is not empty or the engine is on busy state,
469 * we need to wait for a while to pump the requests of engine queue.
470 */
471 while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
472 spin_unlock_irqrestore(&engine->queue_lock, flags);
473 msleep(20);
474 spin_lock_irqsave(&engine->queue_lock, flags);
475 }
476
477 if (crypto_queue_len(&engine->queue) || engine->busy)
478 ret = -EBUSY;
479 else
480 engine->running = false;
481
482 spin_unlock_irqrestore(&engine->queue_lock, flags);
483
484 if (ret)
485 dev_warn(engine->dev, "could not stop engine\n");
486
487 return ret;
488}
489EXPORT_SYMBOL_GPL(crypto_engine_stop);
490
491/**
492 * crypto_engine_alloc_init_and_set - allocate crypto hardware engine structure
493 * and initialize it by setting the maximum number of entries in the software
494 * crypto-engine queue.
495 * @dev: the device attached with one hardware engine
496 * @retry_support: whether hardware has support for retry mechanism
497 * @cbk_do_batch: pointer to a callback function to be invoked when executing
498 * a batch of requests.
499 * This has the form:
500 * callback(struct crypto_engine *engine)
501 * where:
502 * @engine: the crypto engine structure.
503 * @rt: whether this queue is set to run as a realtime task
504 * @qlen: maximum size of the crypto-engine queue
505 *
506 * This must be called from context that can sleep.
507 * Return: the crypto engine structure on success, else NULL.
508 */
509struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
510 bool retry_support,
511 int (*cbk_do_batch)(struct crypto_engine *engine),
512 bool rt, int qlen)
513{
514 struct crypto_engine *engine;
515
516 if (!dev)
517 return NULL;
518
519 engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
520 if (!engine)
521 return NULL;
522
523 engine->dev = dev;
524 engine->rt = rt;
525 engine->running = false;
526 engine->busy = false;
527 engine->idling = false;
528 engine->retry_support = retry_support;
529 engine->priv_data = dev;
530 /*
531 * Batch requests is possible only if
532 * hardware has support for retry mechanism.
533 */
534 engine->do_batch_requests = retry_support ? cbk_do_batch : NULL;
535
536 snprintf(engine->name, sizeof(engine->name),
537 "%s-engine", dev_name(dev));
538
539 crypto_init_queue(&engine->queue, qlen);
540 spin_lock_init(&engine->queue_lock);
541
542 engine->kworker = kthread_create_worker(0, "%s", engine->name);
543 if (IS_ERR(engine->kworker)) {
544 dev_err(dev, "failed to create crypto request pump task\n");
545 return NULL;
546 }
547 kthread_init_work(&engine->pump_requests, crypto_pump_work);
548
549 if (engine->rt) {
550 dev_info(dev, "will run requests pump with realtime priority\n");
551 sched_set_fifo(engine->kworker->task);
552 }
553
554 return engine;
555}
556EXPORT_SYMBOL_GPL(crypto_engine_alloc_init_and_set);
557
558/**
559 * crypto_engine_alloc_init - allocate crypto hardware engine structure and
560 * initialize it.
561 * @dev: the device attached with one hardware engine
562 * @rt: whether this queue is set to run as a realtime task
563 *
564 * This must be called from context that can sleep.
565 * Return: the crypto engine structure on success, else NULL.
566 */
567struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
568{
569 return crypto_engine_alloc_init_and_set(dev, false, NULL, rt,
570 CRYPTO_ENGINE_MAX_QLEN);
571}
572EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
573
574/**
575 * crypto_engine_exit - free the resources of hardware engine when exit
576 * @engine: the hardware engine need to be freed
577 *
578 * Return 0 for success.
579 */
580int crypto_engine_exit(struct crypto_engine *engine)
581{
582 int ret;
583
584 ret = crypto_engine_stop(engine);
585 if (ret)
586 return ret;
587
588 kthread_destroy_worker(engine->kworker);
589
590 return 0;
591}
592EXPORT_SYMBOL_GPL(crypto_engine_exit);
593
594MODULE_LICENSE("GPL");
595MODULE_DESCRIPTION("Crypto hardware engine framework");
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Handle async block request by crypto hardware engine.
4 *
5 * Copyright (C) 2016 Linaro, Inc.
6 *
7 * Author: Baolin Wang <baolin.wang@linaro.org>
8 */
9
10#include <linux/err.h>
11#include <linux/delay.h>
12#include <crypto/engine.h>
13#include <uapi/linux/sched/types.h>
14#include "internal.h"
15
16#define CRYPTO_ENGINE_MAX_QLEN 10
17
18/**
19 * crypto_finalize_request - finalize one request if the request is done
20 * @engine: the hardware engine
21 * @req: the request need to be finalized
22 * @err: error number
23 */
24static void crypto_finalize_request(struct crypto_engine *engine,
25 struct crypto_async_request *req, int err)
26{
27 unsigned long flags;
28 bool finalize_cur_req = false;
29 int ret;
30 struct crypto_engine_ctx *enginectx;
31
32 spin_lock_irqsave(&engine->queue_lock, flags);
33 if (engine->cur_req == req)
34 finalize_cur_req = true;
35 spin_unlock_irqrestore(&engine->queue_lock, flags);
36
37 if (finalize_cur_req) {
38 enginectx = crypto_tfm_ctx(req->tfm);
39 if (engine->cur_req_prepared &&
40 enginectx->op.unprepare_request) {
41 ret = enginectx->op.unprepare_request(engine, req);
42 if (ret)
43 dev_err(engine->dev, "failed to unprepare request\n");
44 }
45 spin_lock_irqsave(&engine->queue_lock, flags);
46 engine->cur_req = NULL;
47 engine->cur_req_prepared = false;
48 spin_unlock_irqrestore(&engine->queue_lock, flags);
49 }
50
51 req->complete(req, err);
52
53 kthread_queue_work(engine->kworker, &engine->pump_requests);
54}
55
56/**
57 * crypto_pump_requests - dequeue one request from engine queue to process
58 * @engine: the hardware engine
59 * @in_kthread: true if we are in the context of the request pump thread
60 *
61 * This function checks if there is any request in the engine queue that
62 * needs processing and if so call out to the driver to initialize hardware
63 * and handle each request.
64 */
65static void crypto_pump_requests(struct crypto_engine *engine,
66 bool in_kthread)
67{
68 struct crypto_async_request *async_req, *backlog;
69 unsigned long flags;
70 bool was_busy = false;
71 int ret;
72 struct crypto_engine_ctx *enginectx;
73
74 spin_lock_irqsave(&engine->queue_lock, flags);
75
76 /* Make sure we are not already running a request */
77 if (engine->cur_req)
78 goto out;
79
80 /* If another context is idling then defer */
81 if (engine->idling) {
82 kthread_queue_work(engine->kworker, &engine->pump_requests);
83 goto out;
84 }
85
86 /* Check if the engine queue is idle */
87 if (!crypto_queue_len(&engine->queue) || !engine->running) {
88 if (!engine->busy)
89 goto out;
90
91 /* Only do teardown in the thread */
92 if (!in_kthread) {
93 kthread_queue_work(engine->kworker,
94 &engine->pump_requests);
95 goto out;
96 }
97
98 engine->busy = false;
99 engine->idling = true;
100 spin_unlock_irqrestore(&engine->queue_lock, flags);
101
102 if (engine->unprepare_crypt_hardware &&
103 engine->unprepare_crypt_hardware(engine))
104 dev_err(engine->dev, "failed to unprepare crypt hardware\n");
105
106 spin_lock_irqsave(&engine->queue_lock, flags);
107 engine->idling = false;
108 goto out;
109 }
110
111 /* Get the fist request from the engine queue to handle */
112 backlog = crypto_get_backlog(&engine->queue);
113 async_req = crypto_dequeue_request(&engine->queue);
114 if (!async_req)
115 goto out;
116
117 engine->cur_req = async_req;
118 if (backlog)
119 backlog->complete(backlog, -EINPROGRESS);
120
121 if (engine->busy)
122 was_busy = true;
123 else
124 engine->busy = true;
125
126 spin_unlock_irqrestore(&engine->queue_lock, flags);
127
128 /* Until here we get the request need to be encrypted successfully */
129 if (!was_busy && engine->prepare_crypt_hardware) {
130 ret = engine->prepare_crypt_hardware(engine);
131 if (ret) {
132 dev_err(engine->dev, "failed to prepare crypt hardware\n");
133 goto req_err;
134 }
135 }
136
137 enginectx = crypto_tfm_ctx(async_req->tfm);
138
139 if (enginectx->op.prepare_request) {
140 ret = enginectx->op.prepare_request(engine, async_req);
141 if (ret) {
142 dev_err(engine->dev, "failed to prepare request: %d\n",
143 ret);
144 goto req_err;
145 }
146 engine->cur_req_prepared = true;
147 }
148 if (!enginectx->op.do_one_request) {
149 dev_err(engine->dev, "failed to do request\n");
150 ret = -EINVAL;
151 goto req_err;
152 }
153 ret = enginectx->op.do_one_request(engine, async_req);
154 if (ret) {
155 dev_err(engine->dev, "Failed to do one request from queue: %d\n", ret);
156 goto req_err;
157 }
158 return;
159
160req_err:
161 crypto_finalize_request(engine, async_req, ret);
162 return;
163
164out:
165 spin_unlock_irqrestore(&engine->queue_lock, flags);
166}
167
168static void crypto_pump_work(struct kthread_work *work)
169{
170 struct crypto_engine *engine =
171 container_of(work, struct crypto_engine, pump_requests);
172
173 crypto_pump_requests(engine, true);
174}
175
176/**
177 * crypto_transfer_request - transfer the new request into the engine queue
178 * @engine: the hardware engine
179 * @req: the request need to be listed into the engine queue
180 */
181static int crypto_transfer_request(struct crypto_engine *engine,
182 struct crypto_async_request *req,
183 bool need_pump)
184{
185 unsigned long flags;
186 int ret;
187
188 spin_lock_irqsave(&engine->queue_lock, flags);
189
190 if (!engine->running) {
191 spin_unlock_irqrestore(&engine->queue_lock, flags);
192 return -ESHUTDOWN;
193 }
194
195 ret = crypto_enqueue_request(&engine->queue, req);
196
197 if (!engine->busy && need_pump)
198 kthread_queue_work(engine->kworker, &engine->pump_requests);
199
200 spin_unlock_irqrestore(&engine->queue_lock, flags);
201 return ret;
202}
203
204/**
205 * crypto_transfer_request_to_engine - transfer one request to list
206 * into the engine queue
207 * @engine: the hardware engine
208 * @req: the request need to be listed into the engine queue
209 */
210static int crypto_transfer_request_to_engine(struct crypto_engine *engine,
211 struct crypto_async_request *req)
212{
213 return crypto_transfer_request(engine, req, true);
214}
215
216/**
217 * crypto_transfer_ablkcipher_request_to_engine - transfer one ablkcipher_request
218 * to list into the engine queue
219 * @engine: the hardware engine
220 * @req: the request need to be listed into the engine queue
221 * TODO: Remove this function when skcipher conversion is finished
222 */
223int crypto_transfer_ablkcipher_request_to_engine(struct crypto_engine *engine,
224 struct ablkcipher_request *req)
225{
226 return crypto_transfer_request_to_engine(engine, &req->base);
227}
228EXPORT_SYMBOL_GPL(crypto_transfer_ablkcipher_request_to_engine);
229
230/**
231 * crypto_transfer_aead_request_to_engine - transfer one aead_request
232 * to list into the engine queue
233 * @engine: the hardware engine
234 * @req: the request need to be listed into the engine queue
235 */
236int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine,
237 struct aead_request *req)
238{
239 return crypto_transfer_request_to_engine(engine, &req->base);
240}
241EXPORT_SYMBOL_GPL(crypto_transfer_aead_request_to_engine);
242
243/**
244 * crypto_transfer_akcipher_request_to_engine - transfer one akcipher_request
245 * to list into the engine queue
246 * @engine: the hardware engine
247 * @req: the request need to be listed into the engine queue
248 */
249int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
250 struct akcipher_request *req)
251{
252 return crypto_transfer_request_to_engine(engine, &req->base);
253}
254EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_request_to_engine);
255
256/**
257 * crypto_transfer_hash_request_to_engine - transfer one ahash_request
258 * to list into the engine queue
259 * @engine: the hardware engine
260 * @req: the request need to be listed into the engine queue
261 */
262int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
263 struct ahash_request *req)
264{
265 return crypto_transfer_request_to_engine(engine, &req->base);
266}
267EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
268
269/**
270 * crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request
271 * to list into the engine queue
272 * @engine: the hardware engine
273 * @req: the request need to be listed into the engine queue
274 */
275int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
276 struct skcipher_request *req)
277{
278 return crypto_transfer_request_to_engine(engine, &req->base);
279}
280EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine);
281
282/**
283 * crypto_finalize_ablkcipher_request - finalize one ablkcipher_request if
284 * the request is done
285 * @engine: the hardware engine
286 * @req: the request need to be finalized
287 * @err: error number
288 * TODO: Remove this function when skcipher conversion is finished
289 */
290void crypto_finalize_ablkcipher_request(struct crypto_engine *engine,
291 struct ablkcipher_request *req, int err)
292{
293 return crypto_finalize_request(engine, &req->base, err);
294}
295EXPORT_SYMBOL_GPL(crypto_finalize_ablkcipher_request);
296
297/**
298 * crypto_finalize_aead_request - finalize one aead_request if
299 * the request is done
300 * @engine: the hardware engine
301 * @req: the request need to be finalized
302 * @err: error number
303 */
304void crypto_finalize_aead_request(struct crypto_engine *engine,
305 struct aead_request *req, int err)
306{
307 return crypto_finalize_request(engine, &req->base, err);
308}
309EXPORT_SYMBOL_GPL(crypto_finalize_aead_request);
310
311/**
312 * crypto_finalize_akcipher_request - finalize one akcipher_request if
313 * the request is done
314 * @engine: the hardware engine
315 * @req: the request need to be finalized
316 * @err: error number
317 */
318void crypto_finalize_akcipher_request(struct crypto_engine *engine,
319 struct akcipher_request *req, int err)
320{
321 return crypto_finalize_request(engine, &req->base, err);
322}
323EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_request);
324
325/**
326 * crypto_finalize_hash_request - finalize one ahash_request if
327 * the request is done
328 * @engine: the hardware engine
329 * @req: the request need to be finalized
330 * @err: error number
331 */
332void crypto_finalize_hash_request(struct crypto_engine *engine,
333 struct ahash_request *req, int err)
334{
335 return crypto_finalize_request(engine, &req->base, err);
336}
337EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
338
339/**
340 * crypto_finalize_skcipher_request - finalize one skcipher_request if
341 * the request is done
342 * @engine: the hardware engine
343 * @req: the request need to be finalized
344 * @err: error number
345 */
346void crypto_finalize_skcipher_request(struct crypto_engine *engine,
347 struct skcipher_request *req, int err)
348{
349 return crypto_finalize_request(engine, &req->base, err);
350}
351EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_request);
352
353/**
354 * crypto_engine_start - start the hardware engine
355 * @engine: the hardware engine need to be started
356 *
357 * Return 0 on success, else on fail.
358 */
359int crypto_engine_start(struct crypto_engine *engine)
360{
361 unsigned long flags;
362
363 spin_lock_irqsave(&engine->queue_lock, flags);
364
365 if (engine->running || engine->busy) {
366 spin_unlock_irqrestore(&engine->queue_lock, flags);
367 return -EBUSY;
368 }
369
370 engine->running = true;
371 spin_unlock_irqrestore(&engine->queue_lock, flags);
372
373 kthread_queue_work(engine->kworker, &engine->pump_requests);
374
375 return 0;
376}
377EXPORT_SYMBOL_GPL(crypto_engine_start);
378
379/**
380 * crypto_engine_stop - stop the hardware engine
381 * @engine: the hardware engine need to be stopped
382 *
383 * Return 0 on success, else on fail.
384 */
385int crypto_engine_stop(struct crypto_engine *engine)
386{
387 unsigned long flags;
388 unsigned int limit = 500;
389 int ret = 0;
390
391 spin_lock_irqsave(&engine->queue_lock, flags);
392
393 /*
394 * If the engine queue is not empty or the engine is on busy state,
395 * we need to wait for a while to pump the requests of engine queue.
396 */
397 while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
398 spin_unlock_irqrestore(&engine->queue_lock, flags);
399 msleep(20);
400 spin_lock_irqsave(&engine->queue_lock, flags);
401 }
402
403 if (crypto_queue_len(&engine->queue) || engine->busy)
404 ret = -EBUSY;
405 else
406 engine->running = false;
407
408 spin_unlock_irqrestore(&engine->queue_lock, flags);
409
410 if (ret)
411 dev_warn(engine->dev, "could not stop engine\n");
412
413 return ret;
414}
415EXPORT_SYMBOL_GPL(crypto_engine_stop);
416
417/**
418 * crypto_engine_alloc_init - allocate crypto hardware engine structure and
419 * initialize it.
420 * @dev: the device attached with one hardware engine
421 * @rt: whether this queue is set to run as a realtime task
422 *
423 * This must be called from context that can sleep.
424 * Return: the crypto engine structure on success, else NULL.
425 */
426struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
427{
428 struct sched_param param = { .sched_priority = MAX_RT_PRIO / 2 };
429 struct crypto_engine *engine;
430
431 if (!dev)
432 return NULL;
433
434 engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
435 if (!engine)
436 return NULL;
437
438 engine->dev = dev;
439 engine->rt = rt;
440 engine->running = false;
441 engine->busy = false;
442 engine->idling = false;
443 engine->cur_req_prepared = false;
444 engine->priv_data = dev;
445 snprintf(engine->name, sizeof(engine->name),
446 "%s-engine", dev_name(dev));
447
448 crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN);
449 spin_lock_init(&engine->queue_lock);
450
451 engine->kworker = kthread_create_worker(0, "%s", engine->name);
452 if (IS_ERR(engine->kworker)) {
453 dev_err(dev, "failed to create crypto request pump task\n");
454 return NULL;
455 }
456 kthread_init_work(&engine->pump_requests, crypto_pump_work);
457
458 if (engine->rt) {
459 dev_info(dev, "will run requests pump with realtime priority\n");
460 sched_setscheduler(engine->kworker->task, SCHED_FIFO, ¶m);
461 }
462
463 return engine;
464}
465EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
466
467/**
468 * crypto_engine_exit - free the resources of hardware engine when exit
469 * @engine: the hardware engine need to be freed
470 *
471 * Return 0 for success.
472 */
473int crypto_engine_exit(struct crypto_engine *engine)
474{
475 int ret;
476
477 ret = crypto_engine_stop(engine);
478 if (ret)
479 return ret;
480
481 kthread_destroy_worker(engine->kworker);
482
483 return 0;
484}
485EXPORT_SYMBOL_GPL(crypto_engine_exit);
486
487MODULE_LICENSE("GPL");
488MODULE_DESCRIPTION("Crypto hardware engine framework");