Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 * Handle async block request by crypto hardware engine.
  3 *
  4 * Copyright (C) 2016 Linaro, Inc.
  5 *
  6 * Author: Baolin Wang <baolin.wang@linaro.org>
  7 *
  8 * This program is free software; you can redistribute it and/or modify it
  9 * under the terms of the GNU General Public License as published by the Free
 10 * Software Foundation; either version 2 of the License, or (at your option)
 11 * any later version.
 12 *
 13 */
 14
 15#include <linux/err.h>
 16#include <linux/delay.h>
 
 
 17#include "internal.h"
 18
 19#define CRYPTO_ENGINE_MAX_QLEN 10
 20
 21void crypto_finalize_request(struct crypto_engine *engine,
 22			     struct ablkcipher_request *req, int err);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 23
 24/**
 25 * crypto_pump_requests - dequeue one request from engine queue to process
 26 * @engine: the hardware engine
 27 * @in_kthread: true if we are in the context of the request pump thread
 28 *
 29 * This function checks if there is any request in the engine queue that
 30 * needs processing and if so call out to the driver to initialize hardware
 31 * and handle each request.
 32 */
 33static void crypto_pump_requests(struct crypto_engine *engine,
 34				 bool in_kthread)
 35{
 36	struct crypto_async_request *async_req, *backlog;
 37	struct ablkcipher_request *req;
 38	unsigned long flags;
 39	bool was_busy = false;
 40	int ret;
 
 41
 42	spin_lock_irqsave(&engine->queue_lock, flags);
 43
 44	/* Make sure we are not already running a request */
 45	if (engine->cur_req)
 46		goto out;
 47
 48	/* If another context is idling then defer */
 49	if (engine->idling) {
 50		queue_kthread_work(&engine->kworker, &engine->pump_requests);
 51		goto out;
 52	}
 53
 54	/* Check if the engine queue is idle */
 55	if (!crypto_queue_len(&engine->queue) || !engine->running) {
 56		if (!engine->busy)
 57			goto out;
 58
 59		/* Only do teardown in the thread */
 60		if (!in_kthread) {
 61			queue_kthread_work(&engine->kworker,
 62					   &engine->pump_requests);
 63			goto out;
 64		}
 65
 66		engine->busy = false;
 67		engine->idling = true;
 68		spin_unlock_irqrestore(&engine->queue_lock, flags);
 69
 70		if (engine->unprepare_crypt_hardware &&
 71		    engine->unprepare_crypt_hardware(engine))
 72			pr_err("failed to unprepare crypt hardware\n");
 73
 74		spin_lock_irqsave(&engine->queue_lock, flags);
 75		engine->idling = false;
 76		goto out;
 77	}
 78
 79	/* Get the fist request from the engine queue to handle */
 80	backlog = crypto_get_backlog(&engine->queue);
 81	async_req = crypto_dequeue_request(&engine->queue);
 82	if (!async_req)
 83		goto out;
 84
 85	req = ablkcipher_request_cast(async_req);
 86
 87	engine->cur_req = req;
 88	if (backlog)
 89		backlog->complete(backlog, -EINPROGRESS);
 90
 91	if (engine->busy)
 92		was_busy = true;
 93	else
 94		engine->busy = true;
 95
 96	spin_unlock_irqrestore(&engine->queue_lock, flags);
 97
 98	/* Until here we get the request need to be encrypted successfully */
 99	if (!was_busy && engine->prepare_crypt_hardware) {
100		ret = engine->prepare_crypt_hardware(engine);
101		if (ret) {
102			pr_err("failed to prepare crypt hardware\n");
103			goto req_err;
104		}
105	}
106
107	if (engine->prepare_request) {
108		ret = engine->prepare_request(engine, engine->cur_req);
 
 
109		if (ret) {
110			pr_err("failed to prepare request: %d\n", ret);
 
111			goto req_err;
112		}
113		engine->cur_req_prepared = true;
114	}
115
116	ret = engine->crypt_one_request(engine, engine->cur_req);
 
 
 
 
117	if (ret) {
118		pr_err("failed to crypt one request from queue\n");
119		goto req_err;
120	}
121	return;
122
123req_err:
124	crypto_finalize_request(engine, engine->cur_req, ret);
125	return;
126
127out:
128	spin_unlock_irqrestore(&engine->queue_lock, flags);
129}
130
131static void crypto_pump_work(struct kthread_work *work)
132{
133	struct crypto_engine *engine =
134		container_of(work, struct crypto_engine, pump_requests);
135
136	crypto_pump_requests(engine, true);
137}
138
139/**
140 * crypto_transfer_request - transfer the new request into the engine queue
141 * @engine: the hardware engine
142 * @req: the request need to be listed into the engine queue
143 */
144int crypto_transfer_request(struct crypto_engine *engine,
145			    struct ablkcipher_request *req, bool need_pump)
 
146{
147	unsigned long flags;
148	int ret;
149
150	spin_lock_irqsave(&engine->queue_lock, flags);
151
152	if (!engine->running) {
153		spin_unlock_irqrestore(&engine->queue_lock, flags);
154		return -ESHUTDOWN;
155	}
156
157	ret = ablkcipher_enqueue_request(&engine->queue, req);
158
159	if (!engine->busy && need_pump)
160		queue_kthread_work(&engine->kworker, &engine->pump_requests);
161
162	spin_unlock_irqrestore(&engine->queue_lock, flags);
163	return ret;
164}
165EXPORT_SYMBOL_GPL(crypto_transfer_request);
166
167/**
168 * crypto_transfer_request_to_engine - transfer one request to list into the
169 * engine queue
170 * @engine: the hardware engine
171 * @req: the request need to be listed into the engine queue
172 */
173int crypto_transfer_request_to_engine(struct crypto_engine *engine,
174				      struct ablkcipher_request *req)
175{
176	return crypto_transfer_request(engine, req, true);
177}
178EXPORT_SYMBOL_GPL(crypto_transfer_request_to_engine);
179
180/**
181 * crypto_finalize_request - finalize one request if the request is done
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
182 * @engine: the hardware engine
183 * @req: the request need to be finalized
184 * @err: error number
 
185 */
186void crypto_finalize_request(struct crypto_engine *engine,
187			     struct ablkcipher_request *req, int err)
188{
189	unsigned long flags;
190	bool finalize_cur_req = false;
191	int ret;
192
193	spin_lock_irqsave(&engine->queue_lock, flags);
194	if (engine->cur_req == req)
195		finalize_cur_req = true;
196	spin_unlock_irqrestore(&engine->queue_lock, flags);
197
198	if (finalize_cur_req) {
199		if (engine->cur_req_prepared && engine->unprepare_request) {
200			ret = engine->unprepare_request(engine, req);
201			if (ret)
202				pr_err("failed to unprepare request\n");
203		}
 
 
 
 
 
 
 
204
205		spin_lock_irqsave(&engine->queue_lock, flags);
206		engine->cur_req = NULL;
207		engine->cur_req_prepared = false;
208		spin_unlock_irqrestore(&engine->queue_lock, flags);
209	}
 
 
 
 
 
 
 
 
210
211	req->base.complete(&req->base, err);
 
 
 
 
 
 
 
 
 
 
 
 
212
213	queue_kthread_work(&engine->kworker, &engine->pump_requests);
 
 
 
 
 
 
 
 
 
 
214}
215EXPORT_SYMBOL_GPL(crypto_finalize_request);
216
217/**
218 * crypto_engine_start - start the hardware engine
219 * @engine: the hardware engine need to be started
220 *
221 * Return 0 on success, else on fail.
222 */
223int crypto_engine_start(struct crypto_engine *engine)
224{
225	unsigned long flags;
226
227	spin_lock_irqsave(&engine->queue_lock, flags);
228
229	if (engine->running || engine->busy) {
230		spin_unlock_irqrestore(&engine->queue_lock, flags);
231		return -EBUSY;
232	}
233
234	engine->running = true;
235	spin_unlock_irqrestore(&engine->queue_lock, flags);
236
237	queue_kthread_work(&engine->kworker, &engine->pump_requests);
238
239	return 0;
240}
241EXPORT_SYMBOL_GPL(crypto_engine_start);
242
243/**
244 * crypto_engine_stop - stop the hardware engine
245 * @engine: the hardware engine need to be stopped
246 *
247 * Return 0 on success, else on fail.
248 */
249int crypto_engine_stop(struct crypto_engine *engine)
250{
251	unsigned long flags;
252	unsigned limit = 500;
253	int ret = 0;
254
255	spin_lock_irqsave(&engine->queue_lock, flags);
256
257	/*
258	 * If the engine queue is not empty or the engine is on busy state,
259	 * we need to wait for a while to pump the requests of engine queue.
260	 */
261	while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
262		spin_unlock_irqrestore(&engine->queue_lock, flags);
263		msleep(20);
264		spin_lock_irqsave(&engine->queue_lock, flags);
265	}
266
267	if (crypto_queue_len(&engine->queue) || engine->busy)
268		ret = -EBUSY;
269	else
270		engine->running = false;
271
272	spin_unlock_irqrestore(&engine->queue_lock, flags);
273
274	if (ret)
275		pr_warn("could not stop engine\n");
276
277	return ret;
278}
279EXPORT_SYMBOL_GPL(crypto_engine_stop);
280
281/**
282 * crypto_engine_alloc_init - allocate crypto hardware engine structure and
283 * initialize it.
284 * @dev: the device attached with one hardware engine
285 * @rt: whether this queue is set to run as a realtime task
286 *
287 * This must be called from context that can sleep.
288 * Return: the crypto engine structure on success, else NULL.
289 */
290struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
291{
292	struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
293	struct crypto_engine *engine;
294
295	if (!dev)
296		return NULL;
297
298	engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
299	if (!engine)
300		return NULL;
301
 
302	engine->rt = rt;
303	engine->running = false;
304	engine->busy = false;
305	engine->idling = false;
306	engine->cur_req_prepared = false;
307	engine->priv_data = dev;
308	snprintf(engine->name, sizeof(engine->name),
309		 "%s-engine", dev_name(dev));
310
311	crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN);
312	spin_lock_init(&engine->queue_lock);
313
314	init_kthread_worker(&engine->kworker);
315	engine->kworker_task = kthread_run(kthread_worker_fn,
316					   &engine->kworker, "%s",
317					   engine->name);
318	if (IS_ERR(engine->kworker_task)) {
319		dev_err(dev, "failed to create crypto request pump task\n");
320		return NULL;
321	}
322	init_kthread_work(&engine->pump_requests, crypto_pump_work);
323
324	if (engine->rt) {
325		dev_info(dev, "will run requests pump with realtime priority\n");
326		sched_setscheduler(engine->kworker_task, SCHED_FIFO, &param);
327	}
328
329	return engine;
330}
331EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
332
333/**
334 * crypto_engine_exit - free the resources of hardware engine when exit
335 * @engine: the hardware engine need to be freed
336 *
337 * Return 0 for success.
338 */
339int crypto_engine_exit(struct crypto_engine *engine)
340{
341	int ret;
342
343	ret = crypto_engine_stop(engine);
344	if (ret)
345		return ret;
346
347	flush_kthread_worker(&engine->kworker);
348	kthread_stop(engine->kworker_task);
349
350	return 0;
351}
352EXPORT_SYMBOL_GPL(crypto_engine_exit);
353
354MODULE_LICENSE("GPL");
355MODULE_DESCRIPTION("Crypto hardware engine framework");
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Handle async block request by crypto hardware engine.
  4 *
  5 * Copyright (C) 2016 Linaro, Inc.
  6 *
  7 * Author: Baolin Wang <baolin.wang@linaro.org>
 
 
 
 
 
 
  8 */
  9
 10#include <linux/err.h>
 11#include <linux/delay.h>
 12#include <crypto/engine.h>
 13#include <uapi/linux/sched/types.h>
 14#include "internal.h"
 15
 16#define CRYPTO_ENGINE_MAX_QLEN 10
 17
 18/**
 19 * crypto_finalize_request - finalize one request if the request is done
 20 * @engine: the hardware engine
 21 * @req: the request need to be finalized
 22 * @err: error number
 23 */
 24static void crypto_finalize_request(struct crypto_engine *engine,
 25			     struct crypto_async_request *req, int err)
 26{
 27	unsigned long flags;
 28	bool finalize_cur_req = false;
 29	int ret;
 30	struct crypto_engine_ctx *enginectx;
 31
 32	spin_lock_irqsave(&engine->queue_lock, flags);
 33	if (engine->cur_req == req)
 34		finalize_cur_req = true;
 35	spin_unlock_irqrestore(&engine->queue_lock, flags);
 36
 37	if (finalize_cur_req) {
 38		enginectx = crypto_tfm_ctx(req->tfm);
 39		if (engine->cur_req_prepared &&
 40		    enginectx->op.unprepare_request) {
 41			ret = enginectx->op.unprepare_request(engine, req);
 42			if (ret)
 43				dev_err(engine->dev, "failed to unprepare request\n");
 44		}
 45		spin_lock_irqsave(&engine->queue_lock, flags);
 46		engine->cur_req = NULL;
 47		engine->cur_req_prepared = false;
 48		spin_unlock_irqrestore(&engine->queue_lock, flags);
 49	}
 50
 51	req->complete(req, err);
 52
 53	kthread_queue_work(engine->kworker, &engine->pump_requests);
 54}
 55
 56/**
 57 * crypto_pump_requests - dequeue one request from engine queue to process
 58 * @engine: the hardware engine
 59 * @in_kthread: true if we are in the context of the request pump thread
 60 *
 61 * This function checks if there is any request in the engine queue that
 62 * needs processing and if so call out to the driver to initialize hardware
 63 * and handle each request.
 64 */
 65static void crypto_pump_requests(struct crypto_engine *engine,
 66				 bool in_kthread)
 67{
 68	struct crypto_async_request *async_req, *backlog;
 
 69	unsigned long flags;
 70	bool was_busy = false;
 71	int ret;
 72	struct crypto_engine_ctx *enginectx;
 73
 74	spin_lock_irqsave(&engine->queue_lock, flags);
 75
 76	/* Make sure we are not already running a request */
 77	if (engine->cur_req)
 78		goto out;
 79
 80	/* If another context is idling then defer */
 81	if (engine->idling) {
 82		kthread_queue_work(engine->kworker, &engine->pump_requests);
 83		goto out;
 84	}
 85
 86	/* Check if the engine queue is idle */
 87	if (!crypto_queue_len(&engine->queue) || !engine->running) {
 88		if (!engine->busy)
 89			goto out;
 90
 91		/* Only do teardown in the thread */
 92		if (!in_kthread) {
 93			kthread_queue_work(engine->kworker,
 94					   &engine->pump_requests);
 95			goto out;
 96		}
 97
 98		engine->busy = false;
 99		engine->idling = true;
100		spin_unlock_irqrestore(&engine->queue_lock, flags);
101
102		if (engine->unprepare_crypt_hardware &&
103		    engine->unprepare_crypt_hardware(engine))
104			dev_err(engine->dev, "failed to unprepare crypt hardware\n");
105
106		spin_lock_irqsave(&engine->queue_lock, flags);
107		engine->idling = false;
108		goto out;
109	}
110
111	/* Get the fist request from the engine queue to handle */
112	backlog = crypto_get_backlog(&engine->queue);
113	async_req = crypto_dequeue_request(&engine->queue);
114	if (!async_req)
115		goto out;
116
117	engine->cur_req = async_req;
 
 
118	if (backlog)
119		backlog->complete(backlog, -EINPROGRESS);
120
121	if (engine->busy)
122		was_busy = true;
123	else
124		engine->busy = true;
125
126	spin_unlock_irqrestore(&engine->queue_lock, flags);
127
128	/* Until here we get the request need to be encrypted successfully */
129	if (!was_busy && engine->prepare_crypt_hardware) {
130		ret = engine->prepare_crypt_hardware(engine);
131		if (ret) {
132			dev_err(engine->dev, "failed to prepare crypt hardware\n");
133			goto req_err;
134		}
135	}
136
137	enginectx = crypto_tfm_ctx(async_req->tfm);
138
139	if (enginectx->op.prepare_request) {
140		ret = enginectx->op.prepare_request(engine, async_req);
141		if (ret) {
142			dev_err(engine->dev, "failed to prepare request: %d\n",
143				ret);
144			goto req_err;
145		}
146		engine->cur_req_prepared = true;
147	}
148	if (!enginectx->op.do_one_request) {
149		dev_err(engine->dev, "failed to do request\n");
150		ret = -EINVAL;
151		goto req_err;
152	}
153	ret = enginectx->op.do_one_request(engine, async_req);
154	if (ret) {
155		dev_err(engine->dev, "Failed to do one request from queue: %d\n", ret);
156		goto req_err;
157	}
158	return;
159
160req_err:
161	crypto_finalize_request(engine, async_req, ret);
162	return;
163
164out:
165	spin_unlock_irqrestore(&engine->queue_lock, flags);
166}
167
168static void crypto_pump_work(struct kthread_work *work)
169{
170	struct crypto_engine *engine =
171		container_of(work, struct crypto_engine, pump_requests);
172
173	crypto_pump_requests(engine, true);
174}
175
176/**
177 * crypto_transfer_request - transfer the new request into the engine queue
178 * @engine: the hardware engine
179 * @req: the request need to be listed into the engine queue
180 */
181static int crypto_transfer_request(struct crypto_engine *engine,
182				   struct crypto_async_request *req,
183				   bool need_pump)
184{
185	unsigned long flags;
186	int ret;
187
188	spin_lock_irqsave(&engine->queue_lock, flags);
189
190	if (!engine->running) {
191		spin_unlock_irqrestore(&engine->queue_lock, flags);
192		return -ESHUTDOWN;
193	}
194
195	ret = crypto_enqueue_request(&engine->queue, req);
196
197	if (!engine->busy && need_pump)
198		kthread_queue_work(engine->kworker, &engine->pump_requests);
199
200	spin_unlock_irqrestore(&engine->queue_lock, flags);
201	return ret;
202}
 
203
204/**
205 * crypto_transfer_request_to_engine - transfer one request to list
206 * into the engine queue
207 * @engine: the hardware engine
208 * @req: the request need to be listed into the engine queue
209 */
210static int crypto_transfer_request_to_engine(struct crypto_engine *engine,
211					     struct crypto_async_request *req)
212{
213	return crypto_transfer_request(engine, req, true);
214}
 
215
216/**
217 * crypto_transfer_ablkcipher_request_to_engine - transfer one ablkcipher_request
218 * to list into the engine queue
219 * @engine: the hardware engine
220 * @req: the request need to be listed into the engine queue
221 * TODO: Remove this function when skcipher conversion is finished
222 */
223int crypto_transfer_ablkcipher_request_to_engine(struct crypto_engine *engine,
224						 struct ablkcipher_request *req)
225{
226	return crypto_transfer_request_to_engine(engine, &req->base);
227}
228EXPORT_SYMBOL_GPL(crypto_transfer_ablkcipher_request_to_engine);
229
230/**
231 * crypto_transfer_aead_request_to_engine - transfer one aead_request
232 * to list into the engine queue
233 * @engine: the hardware engine
234 * @req: the request need to be listed into the engine queue
235 */
236int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine,
237					   struct aead_request *req)
238{
239	return crypto_transfer_request_to_engine(engine, &req->base);
240}
241EXPORT_SYMBOL_GPL(crypto_transfer_aead_request_to_engine);
242
243/**
244 * crypto_transfer_akcipher_request_to_engine - transfer one akcipher_request
245 * to list into the engine queue
246 * @engine: the hardware engine
247 * @req: the request need to be listed into the engine queue
248 */
249int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
250					       struct akcipher_request *req)
251{
252	return crypto_transfer_request_to_engine(engine, &req->base);
253}
254EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_request_to_engine);
255
256/**
257 * crypto_transfer_hash_request_to_engine - transfer one ahash_request
258 * to list into the engine queue
259 * @engine: the hardware engine
260 * @req: the request need to be listed into the engine queue
261 */
262int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
263					   struct ahash_request *req)
264{
265	return crypto_transfer_request_to_engine(engine, &req->base);
266}
267EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
268
269/**
270 * crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request
271 * to list into the engine queue
272 * @engine: the hardware engine
273 * @req: the request need to be listed into the engine queue
274 */
275int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
276					       struct skcipher_request *req)
277{
278	return crypto_transfer_request_to_engine(engine, &req->base);
279}
280EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine);
281
282/**
283 * crypto_finalize_ablkcipher_request - finalize one ablkcipher_request if
284 * the request is done
285 * @engine: the hardware engine
286 * @req: the request need to be finalized
287 * @err: error number
288 * TODO: Remove this function when skcipher conversion is finished
289 */
290void crypto_finalize_ablkcipher_request(struct crypto_engine *engine,
291					struct ablkcipher_request *req, int err)
292{
293	return crypto_finalize_request(engine, &req->base, err);
294}
295EXPORT_SYMBOL_GPL(crypto_finalize_ablkcipher_request);
 
 
 
 
 
296
297/**
298 * crypto_finalize_aead_request - finalize one aead_request if
299 * the request is done
300 * @engine: the hardware engine
301 * @req: the request need to be finalized
302 * @err: error number
303 */
304void crypto_finalize_aead_request(struct crypto_engine *engine,
305				  struct aead_request *req, int err)
306{
307	return crypto_finalize_request(engine, &req->base, err);
308}
309EXPORT_SYMBOL_GPL(crypto_finalize_aead_request);
310
311/**
312 * crypto_finalize_akcipher_request - finalize one akcipher_request if
313 * the request is done
314 * @engine: the hardware engine
315 * @req: the request need to be finalized
316 * @err: error number
317 */
318void crypto_finalize_akcipher_request(struct crypto_engine *engine,
319				      struct akcipher_request *req, int err)
320{
321	return crypto_finalize_request(engine, &req->base, err);
322}
323EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_request);
324
325/**
326 * crypto_finalize_hash_request - finalize one ahash_request if
327 * the request is done
328 * @engine: the hardware engine
329 * @req: the request need to be finalized
330 * @err: error number
331 */
332void crypto_finalize_hash_request(struct crypto_engine *engine,
333				  struct ahash_request *req, int err)
334{
335	return crypto_finalize_request(engine, &req->base, err);
336}
337EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
338
339/**
340 * crypto_finalize_skcipher_request - finalize one skcipher_request if
341 * the request is done
342 * @engine: the hardware engine
343 * @req: the request need to be finalized
344 * @err: error number
345 */
346void crypto_finalize_skcipher_request(struct crypto_engine *engine,
347				      struct skcipher_request *req, int err)
348{
349	return crypto_finalize_request(engine, &req->base, err);
350}
351EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_request);
352
353/**
354 * crypto_engine_start - start the hardware engine
355 * @engine: the hardware engine need to be started
356 *
357 * Return 0 on success, else on fail.
358 */
359int crypto_engine_start(struct crypto_engine *engine)
360{
361	unsigned long flags;
362
363	spin_lock_irqsave(&engine->queue_lock, flags);
364
365	if (engine->running || engine->busy) {
366		spin_unlock_irqrestore(&engine->queue_lock, flags);
367		return -EBUSY;
368	}
369
370	engine->running = true;
371	spin_unlock_irqrestore(&engine->queue_lock, flags);
372
373	kthread_queue_work(engine->kworker, &engine->pump_requests);
374
375	return 0;
376}
377EXPORT_SYMBOL_GPL(crypto_engine_start);
378
379/**
380 * crypto_engine_stop - stop the hardware engine
381 * @engine: the hardware engine need to be stopped
382 *
383 * Return 0 on success, else on fail.
384 */
385int crypto_engine_stop(struct crypto_engine *engine)
386{
387	unsigned long flags;
388	unsigned int limit = 500;
389	int ret = 0;
390
391	spin_lock_irqsave(&engine->queue_lock, flags);
392
393	/*
394	 * If the engine queue is not empty or the engine is on busy state,
395	 * we need to wait for a while to pump the requests of engine queue.
396	 */
397	while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
398		spin_unlock_irqrestore(&engine->queue_lock, flags);
399		msleep(20);
400		spin_lock_irqsave(&engine->queue_lock, flags);
401	}
402
403	if (crypto_queue_len(&engine->queue) || engine->busy)
404		ret = -EBUSY;
405	else
406		engine->running = false;
407
408	spin_unlock_irqrestore(&engine->queue_lock, flags);
409
410	if (ret)
411		dev_warn(engine->dev, "could not stop engine\n");
412
413	return ret;
414}
415EXPORT_SYMBOL_GPL(crypto_engine_stop);
416
417/**
418 * crypto_engine_alloc_init - allocate crypto hardware engine structure and
419 * initialize it.
420 * @dev: the device attached with one hardware engine
421 * @rt: whether this queue is set to run as a realtime task
422 *
423 * This must be called from context that can sleep.
424 * Return: the crypto engine structure on success, else NULL.
425 */
426struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
427{
428	struct sched_param param = { .sched_priority = MAX_RT_PRIO / 2 };
429	struct crypto_engine *engine;
430
431	if (!dev)
432		return NULL;
433
434	engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
435	if (!engine)
436		return NULL;
437
438	engine->dev = dev;
439	engine->rt = rt;
440	engine->running = false;
441	engine->busy = false;
442	engine->idling = false;
443	engine->cur_req_prepared = false;
444	engine->priv_data = dev;
445	snprintf(engine->name, sizeof(engine->name),
446		 "%s-engine", dev_name(dev));
447
448	crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN);
449	spin_lock_init(&engine->queue_lock);
450
451	engine->kworker = kthread_create_worker(0, "%s", engine->name);
452	if (IS_ERR(engine->kworker)) {
 
 
 
453		dev_err(dev, "failed to create crypto request pump task\n");
454		return NULL;
455	}
456	kthread_init_work(&engine->pump_requests, crypto_pump_work);
457
458	if (engine->rt) {
459		dev_info(dev, "will run requests pump with realtime priority\n");
460		sched_setscheduler(engine->kworker->task, SCHED_FIFO, &param);
461	}
462
463	return engine;
464}
465EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
466
467/**
468 * crypto_engine_exit - free the resources of hardware engine when exit
469 * @engine: the hardware engine need to be freed
470 *
471 * Return 0 for success.
472 */
473int crypto_engine_exit(struct crypto_engine *engine)
474{
475	int ret;
476
477	ret = crypto_engine_stop(engine);
478	if (ret)
479		return ret;
480
481	kthread_destroy_worker(engine->kworker);
 
482
483	return 0;
484}
485EXPORT_SYMBOL_GPL(crypto_engine_exit);
486
487MODULE_LICENSE("GPL");
488MODULE_DESCRIPTION("Crypto hardware engine framework");