Loading...
1/*
2 * Handle async block request by crypto hardware engine.
3 *
4 * Copyright (C) 2016 Linaro, Inc.
5 *
6 * Author: Baolin Wang <baolin.wang@linaro.org>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 *
13 */
14
15#include <linux/err.h>
16#include <linux/delay.h>
17#include <crypto/engine.h>
18#include <uapi/linux/sched/types.h>
19#include "internal.h"
20
21#define CRYPTO_ENGINE_MAX_QLEN 10
22
23/**
24 * crypto_finalize_request - finalize one request if the request is done
25 * @engine: the hardware engine
26 * @req: the request need to be finalized
27 * @err: error number
28 */
29static void crypto_finalize_request(struct crypto_engine *engine,
30 struct crypto_async_request *req, int err)
31{
32 unsigned long flags;
33 bool finalize_cur_req = false;
34 int ret;
35 struct crypto_engine_ctx *enginectx;
36
37 spin_lock_irqsave(&engine->queue_lock, flags);
38 if (engine->cur_req == req)
39 finalize_cur_req = true;
40 spin_unlock_irqrestore(&engine->queue_lock, flags);
41
42 if (finalize_cur_req) {
43 enginectx = crypto_tfm_ctx(req->tfm);
44 if (engine->cur_req_prepared &&
45 enginectx->op.unprepare_request) {
46 ret = enginectx->op.unprepare_request(engine, req);
47 if (ret)
48 dev_err(engine->dev, "failed to unprepare request\n");
49 }
50 spin_lock_irqsave(&engine->queue_lock, flags);
51 engine->cur_req = NULL;
52 engine->cur_req_prepared = false;
53 spin_unlock_irqrestore(&engine->queue_lock, flags);
54 }
55
56 req->complete(req, err);
57
58 kthread_queue_work(engine->kworker, &engine->pump_requests);
59}
60
61/**
62 * crypto_pump_requests - dequeue one request from engine queue to process
63 * @engine: the hardware engine
64 * @in_kthread: true if we are in the context of the request pump thread
65 *
66 * This function checks if there is any request in the engine queue that
67 * needs processing and if so call out to the driver to initialize hardware
68 * and handle each request.
69 */
70static void crypto_pump_requests(struct crypto_engine *engine,
71 bool in_kthread)
72{
73 struct crypto_async_request *async_req, *backlog;
74 unsigned long flags;
75 bool was_busy = false;
76 int ret;
77 struct crypto_engine_ctx *enginectx;
78
79 spin_lock_irqsave(&engine->queue_lock, flags);
80
81 /* Make sure we are not already running a request */
82 if (engine->cur_req)
83 goto out;
84
85 /* If another context is idling then defer */
86 if (engine->idling) {
87 kthread_queue_work(engine->kworker, &engine->pump_requests);
88 goto out;
89 }
90
91 /* Check if the engine queue is idle */
92 if (!crypto_queue_len(&engine->queue) || !engine->running) {
93 if (!engine->busy)
94 goto out;
95
96 /* Only do teardown in the thread */
97 if (!in_kthread) {
98 kthread_queue_work(engine->kworker,
99 &engine->pump_requests);
100 goto out;
101 }
102
103 engine->busy = false;
104 engine->idling = true;
105 spin_unlock_irqrestore(&engine->queue_lock, flags);
106
107 if (engine->unprepare_crypt_hardware &&
108 engine->unprepare_crypt_hardware(engine))
109 dev_err(engine->dev, "failed to unprepare crypt hardware\n");
110
111 spin_lock_irqsave(&engine->queue_lock, flags);
112 engine->idling = false;
113 goto out;
114 }
115
116 /* Get the fist request from the engine queue to handle */
117 backlog = crypto_get_backlog(&engine->queue);
118 async_req = crypto_dequeue_request(&engine->queue);
119 if (!async_req)
120 goto out;
121
122 engine->cur_req = async_req;
123 if (backlog)
124 backlog->complete(backlog, -EINPROGRESS);
125
126 if (engine->busy)
127 was_busy = true;
128 else
129 engine->busy = true;
130
131 spin_unlock_irqrestore(&engine->queue_lock, flags);
132
133 /* Until here we get the request need to be encrypted successfully */
134 if (!was_busy && engine->prepare_crypt_hardware) {
135 ret = engine->prepare_crypt_hardware(engine);
136 if (ret) {
137 dev_err(engine->dev, "failed to prepare crypt hardware\n");
138 goto req_err;
139 }
140 }
141
142 enginectx = crypto_tfm_ctx(async_req->tfm);
143
144 if (enginectx->op.prepare_request) {
145 ret = enginectx->op.prepare_request(engine, async_req);
146 if (ret) {
147 dev_err(engine->dev, "failed to prepare request: %d\n",
148 ret);
149 goto req_err;
150 }
151 engine->cur_req_prepared = true;
152 }
153 if (!enginectx->op.do_one_request) {
154 dev_err(engine->dev, "failed to do request\n");
155 ret = -EINVAL;
156 goto req_err;
157 }
158 ret = enginectx->op.do_one_request(engine, async_req);
159 if (ret) {
160 dev_err(engine->dev, "Failed to do one request from queue: %d\n", ret);
161 goto req_err;
162 }
163 return;
164
165req_err:
166 crypto_finalize_request(engine, async_req, ret);
167 return;
168
169out:
170 spin_unlock_irqrestore(&engine->queue_lock, flags);
171}
172
173static void crypto_pump_work(struct kthread_work *work)
174{
175 struct crypto_engine *engine =
176 container_of(work, struct crypto_engine, pump_requests);
177
178 crypto_pump_requests(engine, true);
179}
180
181/**
182 * crypto_transfer_request - transfer the new request into the engine queue
183 * @engine: the hardware engine
184 * @req: the request need to be listed into the engine queue
185 */
186static int crypto_transfer_request(struct crypto_engine *engine,
187 struct crypto_async_request *req,
188 bool need_pump)
189{
190 unsigned long flags;
191 int ret;
192
193 spin_lock_irqsave(&engine->queue_lock, flags);
194
195 if (!engine->running) {
196 spin_unlock_irqrestore(&engine->queue_lock, flags);
197 return -ESHUTDOWN;
198 }
199
200 ret = crypto_enqueue_request(&engine->queue, req);
201
202 if (!engine->busy && need_pump)
203 kthread_queue_work(engine->kworker, &engine->pump_requests);
204
205 spin_unlock_irqrestore(&engine->queue_lock, flags);
206 return ret;
207}
208
209/**
210 * crypto_transfer_request_to_engine - transfer one request to list
211 * into the engine queue
212 * @engine: the hardware engine
213 * @req: the request need to be listed into the engine queue
214 */
215static int crypto_transfer_request_to_engine(struct crypto_engine *engine,
216 struct crypto_async_request *req)
217{
218 return crypto_transfer_request(engine, req, true);
219}
220
221/**
222 * crypto_transfer_ablkcipher_request_to_engine - transfer one ablkcipher_request
223 * to list into the engine queue
224 * @engine: the hardware engine
225 * @req: the request need to be listed into the engine queue
226 * TODO: Remove this function when skcipher conversion is finished
227 */
228int crypto_transfer_ablkcipher_request_to_engine(struct crypto_engine *engine,
229 struct ablkcipher_request *req)
230{
231 return crypto_transfer_request_to_engine(engine, &req->base);
232}
233EXPORT_SYMBOL_GPL(crypto_transfer_ablkcipher_request_to_engine);
234
235/**
236 * crypto_transfer_aead_request_to_engine - transfer one aead_request
237 * to list into the engine queue
238 * @engine: the hardware engine
239 * @req: the request need to be listed into the engine queue
240 */
241int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine,
242 struct aead_request *req)
243{
244 return crypto_transfer_request_to_engine(engine, &req->base);
245}
246EXPORT_SYMBOL_GPL(crypto_transfer_aead_request_to_engine);
247
248/**
249 * crypto_transfer_akcipher_request_to_engine - transfer one akcipher_request
250 * to list into the engine queue
251 * @engine: the hardware engine
252 * @req: the request need to be listed into the engine queue
253 */
254int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
255 struct akcipher_request *req)
256{
257 return crypto_transfer_request_to_engine(engine, &req->base);
258}
259EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_request_to_engine);
260
261/**
262 * crypto_transfer_hash_request_to_engine - transfer one ahash_request
263 * to list into the engine queue
264 * @engine: the hardware engine
265 * @req: the request need to be listed into the engine queue
266 */
267int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
268 struct ahash_request *req)
269{
270 return crypto_transfer_request_to_engine(engine, &req->base);
271}
272EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
273
274/**
275 * crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request
276 * to list into the engine queue
277 * @engine: the hardware engine
278 * @req: the request need to be listed into the engine queue
279 */
280int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
281 struct skcipher_request *req)
282{
283 return crypto_transfer_request_to_engine(engine, &req->base);
284}
285EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine);
286
287/**
288 * crypto_finalize_ablkcipher_request - finalize one ablkcipher_request if
289 * the request is done
290 * @engine: the hardware engine
291 * @req: the request need to be finalized
292 * @err: error number
293 * TODO: Remove this function when skcipher conversion is finished
294 */
295void crypto_finalize_ablkcipher_request(struct crypto_engine *engine,
296 struct ablkcipher_request *req, int err)
297{
298 return crypto_finalize_request(engine, &req->base, err);
299}
300EXPORT_SYMBOL_GPL(crypto_finalize_ablkcipher_request);
301
302/**
303 * crypto_finalize_aead_request - finalize one aead_request if
304 * the request is done
305 * @engine: the hardware engine
306 * @req: the request need to be finalized
307 * @err: error number
308 */
309void crypto_finalize_aead_request(struct crypto_engine *engine,
310 struct aead_request *req, int err)
311{
312 return crypto_finalize_request(engine, &req->base, err);
313}
314EXPORT_SYMBOL_GPL(crypto_finalize_aead_request);
315
316/**
317 * crypto_finalize_akcipher_request - finalize one akcipher_request if
318 * the request is done
319 * @engine: the hardware engine
320 * @req: the request need to be finalized
321 * @err: error number
322 */
323void crypto_finalize_akcipher_request(struct crypto_engine *engine,
324 struct akcipher_request *req, int err)
325{
326 return crypto_finalize_request(engine, &req->base, err);
327}
328EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_request);
329
330/**
331 * crypto_finalize_hash_request - finalize one ahash_request if
332 * the request is done
333 * @engine: the hardware engine
334 * @req: the request need to be finalized
335 * @err: error number
336 */
337void crypto_finalize_hash_request(struct crypto_engine *engine,
338 struct ahash_request *req, int err)
339{
340 return crypto_finalize_request(engine, &req->base, err);
341}
342EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
343
344/**
345 * crypto_finalize_skcipher_request - finalize one skcipher_request if
346 * the request is done
347 * @engine: the hardware engine
348 * @req: the request need to be finalized
349 * @err: error number
350 */
351void crypto_finalize_skcipher_request(struct crypto_engine *engine,
352 struct skcipher_request *req, int err)
353{
354 return crypto_finalize_request(engine, &req->base, err);
355}
356EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_request);
357
358/**
359 * crypto_engine_start - start the hardware engine
360 * @engine: the hardware engine need to be started
361 *
362 * Return 0 on success, else on fail.
363 */
364int crypto_engine_start(struct crypto_engine *engine)
365{
366 unsigned long flags;
367
368 spin_lock_irqsave(&engine->queue_lock, flags);
369
370 if (engine->running || engine->busy) {
371 spin_unlock_irqrestore(&engine->queue_lock, flags);
372 return -EBUSY;
373 }
374
375 engine->running = true;
376 spin_unlock_irqrestore(&engine->queue_lock, flags);
377
378 kthread_queue_work(engine->kworker, &engine->pump_requests);
379
380 return 0;
381}
382EXPORT_SYMBOL_GPL(crypto_engine_start);
383
384/**
385 * crypto_engine_stop - stop the hardware engine
386 * @engine: the hardware engine need to be stopped
387 *
388 * Return 0 on success, else on fail.
389 */
390int crypto_engine_stop(struct crypto_engine *engine)
391{
392 unsigned long flags;
393 unsigned int limit = 500;
394 int ret = 0;
395
396 spin_lock_irqsave(&engine->queue_lock, flags);
397
398 /*
399 * If the engine queue is not empty or the engine is on busy state,
400 * we need to wait for a while to pump the requests of engine queue.
401 */
402 while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
403 spin_unlock_irqrestore(&engine->queue_lock, flags);
404 msleep(20);
405 spin_lock_irqsave(&engine->queue_lock, flags);
406 }
407
408 if (crypto_queue_len(&engine->queue) || engine->busy)
409 ret = -EBUSY;
410 else
411 engine->running = false;
412
413 spin_unlock_irqrestore(&engine->queue_lock, flags);
414
415 if (ret)
416 dev_warn(engine->dev, "could not stop engine\n");
417
418 return ret;
419}
420EXPORT_SYMBOL_GPL(crypto_engine_stop);
421
422/**
423 * crypto_engine_alloc_init - allocate crypto hardware engine structure and
424 * initialize it.
425 * @dev: the device attached with one hardware engine
426 * @rt: whether this queue is set to run as a realtime task
427 *
428 * This must be called from context that can sleep.
429 * Return: the crypto engine structure on success, else NULL.
430 */
431struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
432{
433 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
434 struct crypto_engine *engine;
435
436 if (!dev)
437 return NULL;
438
439 engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
440 if (!engine)
441 return NULL;
442
443 engine->dev = dev;
444 engine->rt = rt;
445 engine->running = false;
446 engine->busy = false;
447 engine->idling = false;
448 engine->cur_req_prepared = false;
449 engine->priv_data = dev;
450 snprintf(engine->name, sizeof(engine->name),
451 "%s-engine", dev_name(dev));
452
453 crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN);
454 spin_lock_init(&engine->queue_lock);
455
456 engine->kworker = kthread_create_worker(0, "%s", engine->name);
457 if (IS_ERR(engine->kworker)) {
458 dev_err(dev, "failed to create crypto request pump task\n");
459 return NULL;
460 }
461 kthread_init_work(&engine->pump_requests, crypto_pump_work);
462
463 if (engine->rt) {
464 dev_info(dev, "will run requests pump with realtime priority\n");
465 sched_setscheduler(engine->kworker->task, SCHED_FIFO, ¶m);
466 }
467
468 return engine;
469}
470EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
471
472/**
473 * crypto_engine_exit - free the resources of hardware engine when exit
474 * @engine: the hardware engine need to be freed
475 *
476 * Return 0 for success.
477 */
478int crypto_engine_exit(struct crypto_engine *engine)
479{
480 int ret;
481
482 ret = crypto_engine_stop(engine);
483 if (ret)
484 return ret;
485
486 kthread_destroy_worker(engine->kworker);
487
488 return 0;
489}
490EXPORT_SYMBOL_GPL(crypto_engine_exit);
491
492MODULE_LICENSE("GPL");
493MODULE_DESCRIPTION("Crypto hardware engine framework");
1/*
2 * Handle async block request by crypto hardware engine.
3 *
4 * Copyright (C) 2016 Linaro, Inc.
5 *
6 * Author: Baolin Wang <baolin.wang@linaro.org>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 *
13 */
14
15#include <linux/err.h>
16#include <linux/delay.h>
17#include <crypto/engine.h>
18#include <crypto/internal/hash.h>
19#include "internal.h"
20
21#define CRYPTO_ENGINE_MAX_QLEN 10
22
23/**
24 * crypto_pump_requests - dequeue one request from engine queue to process
25 * @engine: the hardware engine
26 * @in_kthread: true if we are in the context of the request pump thread
27 *
28 * This function checks if there is any request in the engine queue that
29 * needs processing and if so call out to the driver to initialize hardware
30 * and handle each request.
31 */
32static void crypto_pump_requests(struct crypto_engine *engine,
33 bool in_kthread)
34{
35 struct crypto_async_request *async_req, *backlog;
36 struct ahash_request *hreq;
37 struct ablkcipher_request *breq;
38 unsigned long flags;
39 bool was_busy = false;
40 int ret, rtype;
41
42 spin_lock_irqsave(&engine->queue_lock, flags);
43
44 /* Make sure we are not already running a request */
45 if (engine->cur_req)
46 goto out;
47
48 /* If another context is idling then defer */
49 if (engine->idling) {
50 kthread_queue_work(engine->kworker, &engine->pump_requests);
51 goto out;
52 }
53
54 /* Check if the engine queue is idle */
55 if (!crypto_queue_len(&engine->queue) || !engine->running) {
56 if (!engine->busy)
57 goto out;
58
59 /* Only do teardown in the thread */
60 if (!in_kthread) {
61 kthread_queue_work(engine->kworker,
62 &engine->pump_requests);
63 goto out;
64 }
65
66 engine->busy = false;
67 engine->idling = true;
68 spin_unlock_irqrestore(&engine->queue_lock, flags);
69
70 if (engine->unprepare_crypt_hardware &&
71 engine->unprepare_crypt_hardware(engine))
72 pr_err("failed to unprepare crypt hardware\n");
73
74 spin_lock_irqsave(&engine->queue_lock, flags);
75 engine->idling = false;
76 goto out;
77 }
78
79 /* Get the fist request from the engine queue to handle */
80 backlog = crypto_get_backlog(&engine->queue);
81 async_req = crypto_dequeue_request(&engine->queue);
82 if (!async_req)
83 goto out;
84
85 engine->cur_req = async_req;
86 if (backlog)
87 backlog->complete(backlog, -EINPROGRESS);
88
89 if (engine->busy)
90 was_busy = true;
91 else
92 engine->busy = true;
93
94 spin_unlock_irqrestore(&engine->queue_lock, flags);
95
96 rtype = crypto_tfm_alg_type(engine->cur_req->tfm);
97 /* Until here we get the request need to be encrypted successfully */
98 if (!was_busy && engine->prepare_crypt_hardware) {
99 ret = engine->prepare_crypt_hardware(engine);
100 if (ret) {
101 pr_err("failed to prepare crypt hardware\n");
102 goto req_err;
103 }
104 }
105
106 switch (rtype) {
107 case CRYPTO_ALG_TYPE_AHASH:
108 hreq = ahash_request_cast(engine->cur_req);
109 if (engine->prepare_hash_request) {
110 ret = engine->prepare_hash_request(engine, hreq);
111 if (ret) {
112 pr_err("failed to prepare request: %d\n", ret);
113 goto req_err;
114 }
115 engine->cur_req_prepared = true;
116 }
117 ret = engine->hash_one_request(engine, hreq);
118 if (ret) {
119 pr_err("failed to hash one request from queue\n");
120 goto req_err;
121 }
122 return;
123 case CRYPTO_ALG_TYPE_ABLKCIPHER:
124 breq = ablkcipher_request_cast(engine->cur_req);
125 if (engine->prepare_cipher_request) {
126 ret = engine->prepare_cipher_request(engine, breq);
127 if (ret) {
128 pr_err("failed to prepare request: %d\n", ret);
129 goto req_err;
130 }
131 engine->cur_req_prepared = true;
132 }
133 ret = engine->cipher_one_request(engine, breq);
134 if (ret) {
135 pr_err("failed to cipher one request from queue\n");
136 goto req_err;
137 }
138 return;
139 default:
140 pr_err("failed to prepare request of unknown type\n");
141 return;
142 }
143
144req_err:
145 switch (rtype) {
146 case CRYPTO_ALG_TYPE_AHASH:
147 hreq = ahash_request_cast(engine->cur_req);
148 crypto_finalize_hash_request(engine, hreq, ret);
149 break;
150 case CRYPTO_ALG_TYPE_ABLKCIPHER:
151 breq = ablkcipher_request_cast(engine->cur_req);
152 crypto_finalize_cipher_request(engine, breq, ret);
153 break;
154 }
155 return;
156
157out:
158 spin_unlock_irqrestore(&engine->queue_lock, flags);
159}
160
161static void crypto_pump_work(struct kthread_work *work)
162{
163 struct crypto_engine *engine =
164 container_of(work, struct crypto_engine, pump_requests);
165
166 crypto_pump_requests(engine, true);
167}
168
169/**
170 * crypto_transfer_cipher_request - transfer the new request into the
171 * enginequeue
172 * @engine: the hardware engine
173 * @req: the request need to be listed into the engine queue
174 */
175int crypto_transfer_cipher_request(struct crypto_engine *engine,
176 struct ablkcipher_request *req,
177 bool need_pump)
178{
179 unsigned long flags;
180 int ret;
181
182 spin_lock_irqsave(&engine->queue_lock, flags);
183
184 if (!engine->running) {
185 spin_unlock_irqrestore(&engine->queue_lock, flags);
186 return -ESHUTDOWN;
187 }
188
189 ret = ablkcipher_enqueue_request(&engine->queue, req);
190
191 if (!engine->busy && need_pump)
192 kthread_queue_work(engine->kworker, &engine->pump_requests);
193
194 spin_unlock_irqrestore(&engine->queue_lock, flags);
195 return ret;
196}
197EXPORT_SYMBOL_GPL(crypto_transfer_cipher_request);
198
199/**
200 * crypto_transfer_cipher_request_to_engine - transfer one request to list
201 * into the engine queue
202 * @engine: the hardware engine
203 * @req: the request need to be listed into the engine queue
204 */
205int crypto_transfer_cipher_request_to_engine(struct crypto_engine *engine,
206 struct ablkcipher_request *req)
207{
208 return crypto_transfer_cipher_request(engine, req, true);
209}
210EXPORT_SYMBOL_GPL(crypto_transfer_cipher_request_to_engine);
211
212/**
213 * crypto_transfer_hash_request - transfer the new request into the
214 * enginequeue
215 * @engine: the hardware engine
216 * @req: the request need to be listed into the engine queue
217 */
218int crypto_transfer_hash_request(struct crypto_engine *engine,
219 struct ahash_request *req, bool need_pump)
220{
221 unsigned long flags;
222 int ret;
223
224 spin_lock_irqsave(&engine->queue_lock, flags);
225
226 if (!engine->running) {
227 spin_unlock_irqrestore(&engine->queue_lock, flags);
228 return -ESHUTDOWN;
229 }
230
231 ret = ahash_enqueue_request(&engine->queue, req);
232
233 if (!engine->busy && need_pump)
234 kthread_queue_work(engine->kworker, &engine->pump_requests);
235
236 spin_unlock_irqrestore(&engine->queue_lock, flags);
237 return ret;
238}
239EXPORT_SYMBOL_GPL(crypto_transfer_hash_request);
240
241/**
242 * crypto_transfer_hash_request_to_engine - transfer one request to list
243 * into the engine queue
244 * @engine: the hardware engine
245 * @req: the request need to be listed into the engine queue
246 */
247int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
248 struct ahash_request *req)
249{
250 return crypto_transfer_hash_request(engine, req, true);
251}
252EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
253
254/**
255 * crypto_finalize_cipher_request - finalize one request if the request is done
256 * @engine: the hardware engine
257 * @req: the request need to be finalized
258 * @err: error number
259 */
260void crypto_finalize_cipher_request(struct crypto_engine *engine,
261 struct ablkcipher_request *req, int err)
262{
263 unsigned long flags;
264 bool finalize_cur_req = false;
265 int ret;
266
267 spin_lock_irqsave(&engine->queue_lock, flags);
268 if (engine->cur_req == &req->base)
269 finalize_cur_req = true;
270 spin_unlock_irqrestore(&engine->queue_lock, flags);
271
272 if (finalize_cur_req) {
273 if (engine->cur_req_prepared &&
274 engine->unprepare_cipher_request) {
275 ret = engine->unprepare_cipher_request(engine, req);
276 if (ret)
277 pr_err("failed to unprepare request\n");
278 }
279 spin_lock_irqsave(&engine->queue_lock, flags);
280 engine->cur_req = NULL;
281 engine->cur_req_prepared = false;
282 spin_unlock_irqrestore(&engine->queue_lock, flags);
283 }
284
285 req->base.complete(&req->base, err);
286
287 kthread_queue_work(engine->kworker, &engine->pump_requests);
288}
289EXPORT_SYMBOL_GPL(crypto_finalize_cipher_request);
290
291/**
292 * crypto_finalize_hash_request - finalize one request if the request is done
293 * @engine: the hardware engine
294 * @req: the request need to be finalized
295 * @err: error number
296 */
297void crypto_finalize_hash_request(struct crypto_engine *engine,
298 struct ahash_request *req, int err)
299{
300 unsigned long flags;
301 bool finalize_cur_req = false;
302 int ret;
303
304 spin_lock_irqsave(&engine->queue_lock, flags);
305 if (engine->cur_req == &req->base)
306 finalize_cur_req = true;
307 spin_unlock_irqrestore(&engine->queue_lock, flags);
308
309 if (finalize_cur_req) {
310 if (engine->cur_req_prepared &&
311 engine->unprepare_hash_request) {
312 ret = engine->unprepare_hash_request(engine, req);
313 if (ret)
314 pr_err("failed to unprepare request\n");
315 }
316 spin_lock_irqsave(&engine->queue_lock, flags);
317 engine->cur_req = NULL;
318 engine->cur_req_prepared = false;
319 spin_unlock_irqrestore(&engine->queue_lock, flags);
320 }
321
322 req->base.complete(&req->base, err);
323
324 kthread_queue_work(engine->kworker, &engine->pump_requests);
325}
326EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
327
328/**
329 * crypto_engine_start - start the hardware engine
330 * @engine: the hardware engine need to be started
331 *
332 * Return 0 on success, else on fail.
333 */
334int crypto_engine_start(struct crypto_engine *engine)
335{
336 unsigned long flags;
337
338 spin_lock_irqsave(&engine->queue_lock, flags);
339
340 if (engine->running || engine->busy) {
341 spin_unlock_irqrestore(&engine->queue_lock, flags);
342 return -EBUSY;
343 }
344
345 engine->running = true;
346 spin_unlock_irqrestore(&engine->queue_lock, flags);
347
348 kthread_queue_work(engine->kworker, &engine->pump_requests);
349
350 return 0;
351}
352EXPORT_SYMBOL_GPL(crypto_engine_start);
353
354/**
355 * crypto_engine_stop - stop the hardware engine
356 * @engine: the hardware engine need to be stopped
357 *
358 * Return 0 on success, else on fail.
359 */
360int crypto_engine_stop(struct crypto_engine *engine)
361{
362 unsigned long flags;
363 unsigned int limit = 500;
364 int ret = 0;
365
366 spin_lock_irqsave(&engine->queue_lock, flags);
367
368 /*
369 * If the engine queue is not empty or the engine is on busy state,
370 * we need to wait for a while to pump the requests of engine queue.
371 */
372 while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
373 spin_unlock_irqrestore(&engine->queue_lock, flags);
374 msleep(20);
375 spin_lock_irqsave(&engine->queue_lock, flags);
376 }
377
378 if (crypto_queue_len(&engine->queue) || engine->busy)
379 ret = -EBUSY;
380 else
381 engine->running = false;
382
383 spin_unlock_irqrestore(&engine->queue_lock, flags);
384
385 if (ret)
386 pr_warn("could not stop engine\n");
387
388 return ret;
389}
390EXPORT_SYMBOL_GPL(crypto_engine_stop);
391
392/**
393 * crypto_engine_alloc_init - allocate crypto hardware engine structure and
394 * initialize it.
395 * @dev: the device attached with one hardware engine
396 * @rt: whether this queue is set to run as a realtime task
397 *
398 * This must be called from context that can sleep.
399 * Return: the crypto engine structure on success, else NULL.
400 */
401struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
402{
403 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
404 struct crypto_engine *engine;
405
406 if (!dev)
407 return NULL;
408
409 engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
410 if (!engine)
411 return NULL;
412
413 engine->rt = rt;
414 engine->running = false;
415 engine->busy = false;
416 engine->idling = false;
417 engine->cur_req_prepared = false;
418 engine->priv_data = dev;
419 snprintf(engine->name, sizeof(engine->name),
420 "%s-engine", dev_name(dev));
421
422 crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN);
423 spin_lock_init(&engine->queue_lock);
424
425 engine->kworker = kthread_create_worker(0, "%s", engine->name);
426 if (IS_ERR(engine->kworker)) {
427 dev_err(dev, "failed to create crypto request pump task\n");
428 return NULL;
429 }
430 kthread_init_work(&engine->pump_requests, crypto_pump_work);
431
432 if (engine->rt) {
433 dev_info(dev, "will run requests pump with realtime priority\n");
434 sched_setscheduler(engine->kworker->task, SCHED_FIFO, ¶m);
435 }
436
437 return engine;
438}
439EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
440
441/**
442 * crypto_engine_exit - free the resources of hardware engine when exit
443 * @engine: the hardware engine need to be freed
444 *
445 * Return 0 for success.
446 */
447int crypto_engine_exit(struct crypto_engine *engine)
448{
449 int ret;
450
451 ret = crypto_engine_stop(engine);
452 if (ret)
453 return ret;
454
455 kthread_destroy_worker(engine->kworker);
456
457 return 0;
458}
459EXPORT_SYMBOL_GPL(crypto_engine_exit);
460
461MODULE_LICENSE("GPL");
462MODULE_DESCRIPTION("Crypto hardware engine framework");