Linux Audio

Check our new training course

Loading...
v4.6
  1/*
  2 * Handle async block request by crypto hardware engine.
  3 *
  4 * Copyright (C) 2016 Linaro, Inc.
  5 *
  6 * Author: Baolin Wang <baolin.wang@linaro.org>
  7 *
  8 * This program is free software; you can redistribute it and/or modify it
  9 * under the terms of the GNU General Public License as published by the Free
 10 * Software Foundation; either version 2 of the License, or (at your option)
 11 * any later version.
 12 *
 13 */
 14
 15#include <linux/err.h>
 16#include <linux/delay.h>
 
 
 17#include "internal.h"
 18
 19#define CRYPTO_ENGINE_MAX_QLEN 10
 20
 21void crypto_finalize_request(struct crypto_engine *engine,
 22			     struct ablkcipher_request *req, int err);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 23
 24/**
 25 * crypto_pump_requests - dequeue one request from engine queue to process
 26 * @engine: the hardware engine
 27 * @in_kthread: true if we are in the context of the request pump thread
 28 *
 29 * This function checks if there is any request in the engine queue that
 30 * needs processing and if so call out to the driver to initialize hardware
 31 * and handle each request.
 32 */
 33static void crypto_pump_requests(struct crypto_engine *engine,
 34				 bool in_kthread)
 35{
 36	struct crypto_async_request *async_req, *backlog;
 37	struct ablkcipher_request *req;
 38	unsigned long flags;
 39	bool was_busy = false;
 40	int ret;
 
 41
 42	spin_lock_irqsave(&engine->queue_lock, flags);
 43
 44	/* Make sure we are not already running a request */
 45	if (engine->cur_req)
 46		goto out;
 47
 48	/* If another context is idling then defer */
 49	if (engine->idling) {
 50		queue_kthread_work(&engine->kworker, &engine->pump_requests);
 51		goto out;
 52	}
 53
 54	/* Check if the engine queue is idle */
 55	if (!crypto_queue_len(&engine->queue) || !engine->running) {
 56		if (!engine->busy)
 57			goto out;
 58
 59		/* Only do teardown in the thread */
 60		if (!in_kthread) {
 61			queue_kthread_work(&engine->kworker,
 62					   &engine->pump_requests);
 63			goto out;
 64		}
 65
 66		engine->busy = false;
 67		engine->idling = true;
 68		spin_unlock_irqrestore(&engine->queue_lock, flags);
 69
 70		if (engine->unprepare_crypt_hardware &&
 71		    engine->unprepare_crypt_hardware(engine))
 72			pr_err("failed to unprepare crypt hardware\n");
 73
 74		spin_lock_irqsave(&engine->queue_lock, flags);
 75		engine->idling = false;
 76		goto out;
 77	}
 78
 79	/* Get the fist request from the engine queue to handle */
 80	backlog = crypto_get_backlog(&engine->queue);
 81	async_req = crypto_dequeue_request(&engine->queue);
 82	if (!async_req)
 83		goto out;
 84
 85	req = ablkcipher_request_cast(async_req);
 86
 87	engine->cur_req = req;
 88	if (backlog)
 89		backlog->complete(backlog, -EINPROGRESS);
 90
 91	if (engine->busy)
 92		was_busy = true;
 93	else
 94		engine->busy = true;
 95
 96	spin_unlock_irqrestore(&engine->queue_lock, flags);
 97
 98	/* Until here we get the request need to be encrypted successfully */
 99	if (!was_busy && engine->prepare_crypt_hardware) {
100		ret = engine->prepare_crypt_hardware(engine);
101		if (ret) {
102			pr_err("failed to prepare crypt hardware\n");
103			goto req_err;
104		}
105	}
106
107	if (engine->prepare_request) {
108		ret = engine->prepare_request(engine, engine->cur_req);
 
 
109		if (ret) {
110			pr_err("failed to prepare request: %d\n", ret);
 
111			goto req_err;
112		}
113		engine->cur_req_prepared = true;
114	}
115
116	ret = engine->crypt_one_request(engine, engine->cur_req);
 
 
 
 
117	if (ret) {
118		pr_err("failed to crypt one request from queue\n");
119		goto req_err;
120	}
121	return;
122
123req_err:
124	crypto_finalize_request(engine, engine->cur_req, ret);
125	return;
126
127out:
128	spin_unlock_irqrestore(&engine->queue_lock, flags);
129}
130
131static void crypto_pump_work(struct kthread_work *work)
132{
133	struct crypto_engine *engine =
134		container_of(work, struct crypto_engine, pump_requests);
135
136	crypto_pump_requests(engine, true);
137}
138
139/**
140 * crypto_transfer_request - transfer the new request into the engine queue
141 * @engine: the hardware engine
142 * @req: the request need to be listed into the engine queue
143 */
144int crypto_transfer_request(struct crypto_engine *engine,
145			    struct ablkcipher_request *req, bool need_pump)
 
146{
147	unsigned long flags;
148	int ret;
149
150	spin_lock_irqsave(&engine->queue_lock, flags);
151
152	if (!engine->running) {
153		spin_unlock_irqrestore(&engine->queue_lock, flags);
154		return -ESHUTDOWN;
155	}
156
157	ret = ablkcipher_enqueue_request(&engine->queue, req);
158
159	if (!engine->busy && need_pump)
160		queue_kthread_work(&engine->kworker, &engine->pump_requests);
161
162	spin_unlock_irqrestore(&engine->queue_lock, flags);
163	return ret;
164}
165EXPORT_SYMBOL_GPL(crypto_transfer_request);
166
167/**
168 * crypto_transfer_request_to_engine - transfer one request to list into the
169 * engine queue
170 * @engine: the hardware engine
171 * @req: the request need to be listed into the engine queue
172 */
173int crypto_transfer_request_to_engine(struct crypto_engine *engine,
174				      struct ablkcipher_request *req)
175{
176	return crypto_transfer_request(engine, req, true);
177}
178EXPORT_SYMBOL_GPL(crypto_transfer_request_to_engine);
179
180/**
181 * crypto_finalize_request - finalize one request if the request is done
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
182 * @engine: the hardware engine
183 * @req: the request need to be finalized
184 * @err: error number
 
185 */
186void crypto_finalize_request(struct crypto_engine *engine,
187			     struct ablkcipher_request *req, int err)
188{
189	unsigned long flags;
190	bool finalize_cur_req = false;
191	int ret;
192
193	spin_lock_irqsave(&engine->queue_lock, flags);
194	if (engine->cur_req == req)
195		finalize_cur_req = true;
196	spin_unlock_irqrestore(&engine->queue_lock, flags);
197
198	if (finalize_cur_req) {
199		if (engine->cur_req_prepared && engine->unprepare_request) {
200			ret = engine->unprepare_request(engine, req);
201			if (ret)
202				pr_err("failed to unprepare request\n");
203		}
 
 
 
 
 
 
 
204
205		spin_lock_irqsave(&engine->queue_lock, flags);
206		engine->cur_req = NULL;
207		engine->cur_req_prepared = false;
208		spin_unlock_irqrestore(&engine->queue_lock, flags);
209	}
 
 
 
 
 
 
 
 
210
211	req->base.complete(&req->base, err);
 
 
 
 
 
 
 
 
 
 
 
 
212
213	queue_kthread_work(&engine->kworker, &engine->pump_requests);
 
 
 
 
 
 
 
 
 
 
214}
215EXPORT_SYMBOL_GPL(crypto_finalize_request);
216
217/**
218 * crypto_engine_start - start the hardware engine
219 * @engine: the hardware engine need to be started
220 *
221 * Return 0 on success, else on fail.
222 */
223int crypto_engine_start(struct crypto_engine *engine)
224{
225	unsigned long flags;
226
227	spin_lock_irqsave(&engine->queue_lock, flags);
228
229	if (engine->running || engine->busy) {
230		spin_unlock_irqrestore(&engine->queue_lock, flags);
231		return -EBUSY;
232	}
233
234	engine->running = true;
235	spin_unlock_irqrestore(&engine->queue_lock, flags);
236
237	queue_kthread_work(&engine->kworker, &engine->pump_requests);
238
239	return 0;
240}
241EXPORT_SYMBOL_GPL(crypto_engine_start);
242
243/**
244 * crypto_engine_stop - stop the hardware engine
245 * @engine: the hardware engine need to be stopped
246 *
247 * Return 0 on success, else on fail.
248 */
249int crypto_engine_stop(struct crypto_engine *engine)
250{
251	unsigned long flags;
252	unsigned limit = 500;
253	int ret = 0;
254
255	spin_lock_irqsave(&engine->queue_lock, flags);
256
257	/*
258	 * If the engine queue is not empty or the engine is on busy state,
259	 * we need to wait for a while to pump the requests of engine queue.
260	 */
261	while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
262		spin_unlock_irqrestore(&engine->queue_lock, flags);
263		msleep(20);
264		spin_lock_irqsave(&engine->queue_lock, flags);
265	}
266
267	if (crypto_queue_len(&engine->queue) || engine->busy)
268		ret = -EBUSY;
269	else
270		engine->running = false;
271
272	spin_unlock_irqrestore(&engine->queue_lock, flags);
273
274	if (ret)
275		pr_warn("could not stop engine\n");
276
277	return ret;
278}
279EXPORT_SYMBOL_GPL(crypto_engine_stop);
280
281/**
282 * crypto_engine_alloc_init - allocate crypto hardware engine structure and
283 * initialize it.
284 * @dev: the device attached with one hardware engine
285 * @rt: whether this queue is set to run as a realtime task
286 *
287 * This must be called from context that can sleep.
288 * Return: the crypto engine structure on success, else NULL.
289 */
290struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
291{
292	struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
293	struct crypto_engine *engine;
294
295	if (!dev)
296		return NULL;
297
298	engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
299	if (!engine)
300		return NULL;
301
 
302	engine->rt = rt;
303	engine->running = false;
304	engine->busy = false;
305	engine->idling = false;
306	engine->cur_req_prepared = false;
307	engine->priv_data = dev;
308	snprintf(engine->name, sizeof(engine->name),
309		 "%s-engine", dev_name(dev));
310
311	crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN);
312	spin_lock_init(&engine->queue_lock);
313
314	init_kthread_worker(&engine->kworker);
315	engine->kworker_task = kthread_run(kthread_worker_fn,
316					   &engine->kworker, "%s",
317					   engine->name);
318	if (IS_ERR(engine->kworker_task)) {
319		dev_err(dev, "failed to create crypto request pump task\n");
320		return NULL;
321	}
322	init_kthread_work(&engine->pump_requests, crypto_pump_work);
323
324	if (engine->rt) {
325		dev_info(dev, "will run requests pump with realtime priority\n");
326		sched_setscheduler(engine->kworker_task, SCHED_FIFO, &param);
327	}
328
329	return engine;
330}
331EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
332
333/**
334 * crypto_engine_exit - free the resources of hardware engine when exit
335 * @engine: the hardware engine need to be freed
336 *
337 * Return 0 for success.
338 */
339int crypto_engine_exit(struct crypto_engine *engine)
340{
341	int ret;
342
343	ret = crypto_engine_stop(engine);
344	if (ret)
345		return ret;
346
347	flush_kthread_worker(&engine->kworker);
348	kthread_stop(engine->kworker_task);
349
350	return 0;
351}
352EXPORT_SYMBOL_GPL(crypto_engine_exit);
353
354MODULE_LICENSE("GPL");
355MODULE_DESCRIPTION("Crypto hardware engine framework");
v4.17
  1/*
  2 * Handle async block request by crypto hardware engine.
  3 *
  4 * Copyright (C) 2016 Linaro, Inc.
  5 *
  6 * Author: Baolin Wang <baolin.wang@linaro.org>
  7 *
  8 * This program is free software; you can redistribute it and/or modify it
  9 * under the terms of the GNU General Public License as published by the Free
 10 * Software Foundation; either version 2 of the License, or (at your option)
 11 * any later version.
 12 *
 13 */
 14
 15#include <linux/err.h>
 16#include <linux/delay.h>
 17#include <crypto/engine.h>
 18#include <uapi/linux/sched/types.h>
 19#include "internal.h"
 20
 21#define CRYPTO_ENGINE_MAX_QLEN 10
 22
 23/**
 24 * crypto_finalize_request - finalize one request if the request is done
 25 * @engine: the hardware engine
 26 * @req: the request need to be finalized
 27 * @err: error number
 28 */
 29static void crypto_finalize_request(struct crypto_engine *engine,
 30			     struct crypto_async_request *req, int err)
 31{
 32	unsigned long flags;
 33	bool finalize_cur_req = false;
 34	int ret;
 35	struct crypto_engine_ctx *enginectx;
 36
 37	spin_lock_irqsave(&engine->queue_lock, flags);
 38	if (engine->cur_req == req)
 39		finalize_cur_req = true;
 40	spin_unlock_irqrestore(&engine->queue_lock, flags);
 41
 42	if (finalize_cur_req) {
 43		enginectx = crypto_tfm_ctx(req->tfm);
 44		if (engine->cur_req_prepared &&
 45		    enginectx->op.unprepare_request) {
 46			ret = enginectx->op.unprepare_request(engine, req);
 47			if (ret)
 48				dev_err(engine->dev, "failed to unprepare request\n");
 49		}
 50		spin_lock_irqsave(&engine->queue_lock, flags);
 51		engine->cur_req = NULL;
 52		engine->cur_req_prepared = false;
 53		spin_unlock_irqrestore(&engine->queue_lock, flags);
 54	}
 55
 56	req->complete(req, err);
 57
 58	kthread_queue_work(engine->kworker, &engine->pump_requests);
 59}
 60
 61/**
 62 * crypto_pump_requests - dequeue one request from engine queue to process
 63 * @engine: the hardware engine
 64 * @in_kthread: true if we are in the context of the request pump thread
 65 *
 66 * This function checks if there is any request in the engine queue that
 67 * needs processing and if so call out to the driver to initialize hardware
 68 * and handle each request.
 69 */
 70static void crypto_pump_requests(struct crypto_engine *engine,
 71				 bool in_kthread)
 72{
 73	struct crypto_async_request *async_req, *backlog;
 
 74	unsigned long flags;
 75	bool was_busy = false;
 76	int ret;
 77	struct crypto_engine_ctx *enginectx;
 78
 79	spin_lock_irqsave(&engine->queue_lock, flags);
 80
 81	/* Make sure we are not already running a request */
 82	if (engine->cur_req)
 83		goto out;
 84
 85	/* If another context is idling then defer */
 86	if (engine->idling) {
 87		kthread_queue_work(engine->kworker, &engine->pump_requests);
 88		goto out;
 89	}
 90
 91	/* Check if the engine queue is idle */
 92	if (!crypto_queue_len(&engine->queue) || !engine->running) {
 93		if (!engine->busy)
 94			goto out;
 95
 96		/* Only do teardown in the thread */
 97		if (!in_kthread) {
 98			kthread_queue_work(engine->kworker,
 99					   &engine->pump_requests);
100			goto out;
101		}
102
103		engine->busy = false;
104		engine->idling = true;
105		spin_unlock_irqrestore(&engine->queue_lock, flags);
106
107		if (engine->unprepare_crypt_hardware &&
108		    engine->unprepare_crypt_hardware(engine))
109			dev_err(engine->dev, "failed to unprepare crypt hardware\n");
110
111		spin_lock_irqsave(&engine->queue_lock, flags);
112		engine->idling = false;
113		goto out;
114	}
115
116	/* Get the fist request from the engine queue to handle */
117	backlog = crypto_get_backlog(&engine->queue);
118	async_req = crypto_dequeue_request(&engine->queue);
119	if (!async_req)
120		goto out;
121
122	engine->cur_req = async_req;
 
 
123	if (backlog)
124		backlog->complete(backlog, -EINPROGRESS);
125
126	if (engine->busy)
127		was_busy = true;
128	else
129		engine->busy = true;
130
131	spin_unlock_irqrestore(&engine->queue_lock, flags);
132
133	/* Until here we get the request need to be encrypted successfully */
134	if (!was_busy && engine->prepare_crypt_hardware) {
135		ret = engine->prepare_crypt_hardware(engine);
136		if (ret) {
137			dev_err(engine->dev, "failed to prepare crypt hardware\n");
138			goto req_err;
139		}
140	}
141
142	enginectx = crypto_tfm_ctx(async_req->tfm);
143
144	if (enginectx->op.prepare_request) {
145		ret = enginectx->op.prepare_request(engine, async_req);
146		if (ret) {
147			dev_err(engine->dev, "failed to prepare request: %d\n",
148				ret);
149			goto req_err;
150		}
151		engine->cur_req_prepared = true;
152	}
153	if (!enginectx->op.do_one_request) {
154		dev_err(engine->dev, "failed to do request\n");
155		ret = -EINVAL;
156		goto req_err;
157	}
158	ret = enginectx->op.do_one_request(engine, async_req);
159	if (ret) {
160		dev_err(engine->dev, "Failed to do one request from queue: %d\n", ret);
161		goto req_err;
162	}
163	return;
164
165req_err:
166	crypto_finalize_request(engine, async_req, ret);
167	return;
168
169out:
170	spin_unlock_irqrestore(&engine->queue_lock, flags);
171}
172
173static void crypto_pump_work(struct kthread_work *work)
174{
175	struct crypto_engine *engine =
176		container_of(work, struct crypto_engine, pump_requests);
177
178	crypto_pump_requests(engine, true);
179}
180
181/**
182 * crypto_transfer_request - transfer the new request into the engine queue
183 * @engine: the hardware engine
184 * @req: the request need to be listed into the engine queue
185 */
186static int crypto_transfer_request(struct crypto_engine *engine,
187				   struct crypto_async_request *req,
188				   bool need_pump)
189{
190	unsigned long flags;
191	int ret;
192
193	spin_lock_irqsave(&engine->queue_lock, flags);
194
195	if (!engine->running) {
196		spin_unlock_irqrestore(&engine->queue_lock, flags);
197		return -ESHUTDOWN;
198	}
199
200	ret = crypto_enqueue_request(&engine->queue, req);
201
202	if (!engine->busy && need_pump)
203		kthread_queue_work(engine->kworker, &engine->pump_requests);
204
205	spin_unlock_irqrestore(&engine->queue_lock, flags);
206	return ret;
207}
 
208
209/**
210 * crypto_transfer_request_to_engine - transfer one request to list
211 * into the engine queue
212 * @engine: the hardware engine
213 * @req: the request need to be listed into the engine queue
214 */
215static int crypto_transfer_request_to_engine(struct crypto_engine *engine,
216					     struct crypto_async_request *req)
217{
218	return crypto_transfer_request(engine, req, true);
219}
 
220
221/**
222 * crypto_transfer_ablkcipher_request_to_engine - transfer one ablkcipher_request
223 * to list into the engine queue
224 * @engine: the hardware engine
225 * @req: the request need to be listed into the engine queue
226 * TODO: Remove this function when skcipher conversion is finished
227 */
228int crypto_transfer_ablkcipher_request_to_engine(struct crypto_engine *engine,
229						 struct ablkcipher_request *req)
230{
231	return crypto_transfer_request_to_engine(engine, &req->base);
232}
233EXPORT_SYMBOL_GPL(crypto_transfer_ablkcipher_request_to_engine);
234
235/**
236 * crypto_transfer_aead_request_to_engine - transfer one aead_request
237 * to list into the engine queue
238 * @engine: the hardware engine
239 * @req: the request need to be listed into the engine queue
240 */
241int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine,
242					   struct aead_request *req)
243{
244	return crypto_transfer_request_to_engine(engine, &req->base);
245}
246EXPORT_SYMBOL_GPL(crypto_transfer_aead_request_to_engine);
247
248/**
249 * crypto_transfer_akcipher_request_to_engine - transfer one akcipher_request
250 * to list into the engine queue
251 * @engine: the hardware engine
252 * @req: the request need to be listed into the engine queue
253 */
254int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
255					       struct akcipher_request *req)
256{
257	return crypto_transfer_request_to_engine(engine, &req->base);
258}
259EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_request_to_engine);
260
261/**
262 * crypto_transfer_hash_request_to_engine - transfer one ahash_request
263 * to list into the engine queue
264 * @engine: the hardware engine
265 * @req: the request need to be listed into the engine queue
266 */
267int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
268					   struct ahash_request *req)
269{
270	return crypto_transfer_request_to_engine(engine, &req->base);
271}
272EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
273
274/**
275 * crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request
276 * to list into the engine queue
277 * @engine: the hardware engine
278 * @req: the request need to be listed into the engine queue
279 */
280int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
281					       struct skcipher_request *req)
282{
283	return crypto_transfer_request_to_engine(engine, &req->base);
284}
285EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine);
286
287/**
288 * crypto_finalize_ablkcipher_request - finalize one ablkcipher_request if
289 * the request is done
290 * @engine: the hardware engine
291 * @req: the request need to be finalized
292 * @err: error number
293 * TODO: Remove this function when skcipher conversion is finished
294 */
295void crypto_finalize_ablkcipher_request(struct crypto_engine *engine,
296					struct ablkcipher_request *req, int err)
297{
298	return crypto_finalize_request(engine, &req->base, err);
299}
300EXPORT_SYMBOL_GPL(crypto_finalize_ablkcipher_request);
 
 
 
 
 
301
302/**
303 * crypto_finalize_aead_request - finalize one aead_request if
304 * the request is done
305 * @engine: the hardware engine
306 * @req: the request need to be finalized
307 * @err: error number
308 */
309void crypto_finalize_aead_request(struct crypto_engine *engine,
310				  struct aead_request *req, int err)
311{
312	return crypto_finalize_request(engine, &req->base, err);
313}
314EXPORT_SYMBOL_GPL(crypto_finalize_aead_request);
315
316/**
317 * crypto_finalize_akcipher_request - finalize one akcipher_request if
318 * the request is done
319 * @engine: the hardware engine
320 * @req: the request need to be finalized
321 * @err: error number
322 */
323void crypto_finalize_akcipher_request(struct crypto_engine *engine,
324				      struct akcipher_request *req, int err)
325{
326	return crypto_finalize_request(engine, &req->base, err);
327}
328EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_request);
329
330/**
331 * crypto_finalize_hash_request - finalize one ahash_request if
332 * the request is done
333 * @engine: the hardware engine
334 * @req: the request need to be finalized
335 * @err: error number
336 */
337void crypto_finalize_hash_request(struct crypto_engine *engine,
338				  struct ahash_request *req, int err)
339{
340	return crypto_finalize_request(engine, &req->base, err);
341}
342EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
343
344/**
345 * crypto_finalize_skcipher_request - finalize one skcipher_request if
346 * the request is done
347 * @engine: the hardware engine
348 * @req: the request need to be finalized
349 * @err: error number
350 */
351void crypto_finalize_skcipher_request(struct crypto_engine *engine,
352				      struct skcipher_request *req, int err)
353{
354	return crypto_finalize_request(engine, &req->base, err);
355}
356EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_request);
357
358/**
359 * crypto_engine_start - start the hardware engine
360 * @engine: the hardware engine need to be started
361 *
362 * Return 0 on success, else on fail.
363 */
364int crypto_engine_start(struct crypto_engine *engine)
365{
366	unsigned long flags;
367
368	spin_lock_irqsave(&engine->queue_lock, flags);
369
370	if (engine->running || engine->busy) {
371		spin_unlock_irqrestore(&engine->queue_lock, flags);
372		return -EBUSY;
373	}
374
375	engine->running = true;
376	spin_unlock_irqrestore(&engine->queue_lock, flags);
377
378	kthread_queue_work(engine->kworker, &engine->pump_requests);
379
380	return 0;
381}
382EXPORT_SYMBOL_GPL(crypto_engine_start);
383
384/**
385 * crypto_engine_stop - stop the hardware engine
386 * @engine: the hardware engine need to be stopped
387 *
388 * Return 0 on success, else on fail.
389 */
390int crypto_engine_stop(struct crypto_engine *engine)
391{
392	unsigned long flags;
393	unsigned int limit = 500;
394	int ret = 0;
395
396	spin_lock_irqsave(&engine->queue_lock, flags);
397
398	/*
399	 * If the engine queue is not empty or the engine is on busy state,
400	 * we need to wait for a while to pump the requests of engine queue.
401	 */
402	while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
403		spin_unlock_irqrestore(&engine->queue_lock, flags);
404		msleep(20);
405		spin_lock_irqsave(&engine->queue_lock, flags);
406	}
407
408	if (crypto_queue_len(&engine->queue) || engine->busy)
409		ret = -EBUSY;
410	else
411		engine->running = false;
412
413	spin_unlock_irqrestore(&engine->queue_lock, flags);
414
415	if (ret)
416		dev_warn(engine->dev, "could not stop engine\n");
417
418	return ret;
419}
420EXPORT_SYMBOL_GPL(crypto_engine_stop);
421
422/**
423 * crypto_engine_alloc_init - allocate crypto hardware engine structure and
424 * initialize it.
425 * @dev: the device attached with one hardware engine
426 * @rt: whether this queue is set to run as a realtime task
427 *
428 * This must be called from context that can sleep.
429 * Return: the crypto engine structure on success, else NULL.
430 */
431struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
432{
433	struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
434	struct crypto_engine *engine;
435
436	if (!dev)
437		return NULL;
438
439	engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
440	if (!engine)
441		return NULL;
442
443	engine->dev = dev;
444	engine->rt = rt;
445	engine->running = false;
446	engine->busy = false;
447	engine->idling = false;
448	engine->cur_req_prepared = false;
449	engine->priv_data = dev;
450	snprintf(engine->name, sizeof(engine->name),
451		 "%s-engine", dev_name(dev));
452
453	crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN);
454	spin_lock_init(&engine->queue_lock);
455
456	engine->kworker = kthread_create_worker(0, "%s", engine->name);
457	if (IS_ERR(engine->kworker)) {
 
 
 
458		dev_err(dev, "failed to create crypto request pump task\n");
459		return NULL;
460	}
461	kthread_init_work(&engine->pump_requests, crypto_pump_work);
462
463	if (engine->rt) {
464		dev_info(dev, "will run requests pump with realtime priority\n");
465		sched_setscheduler(engine->kworker->task, SCHED_FIFO, &param);
466	}
467
468	return engine;
469}
470EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
471
472/**
473 * crypto_engine_exit - free the resources of hardware engine when exit
474 * @engine: the hardware engine need to be freed
475 *
476 * Return 0 for success.
477 */
478int crypto_engine_exit(struct crypto_engine *engine)
479{
480	int ret;
481
482	ret = crypto_engine_stop(engine);
483	if (ret)
484		return ret;
485
486	kthread_destroy_worker(engine->kworker);
 
487
488	return 0;
489}
490EXPORT_SYMBOL_GPL(crypto_engine_exit);
491
492MODULE_LICENSE("GPL");
493MODULE_DESCRIPTION("Crypto hardware engine framework");