Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Handle async block request by crypto hardware engine.
  4 *
  5 * Copyright (C) 2016 Linaro, Inc.
  6 *
  7 * Author: Baolin Wang <baolin.wang@linaro.org>
  8 */
  9
 10#include <linux/err.h>
 11#include <linux/delay.h>
 
 12#include <crypto/engine.h>
 13#include <uapi/linux/sched/types.h>
 14#include "internal.h"
 15
 16#define CRYPTO_ENGINE_MAX_QLEN 10
 17
 18/**
 19 * crypto_finalize_request - finalize one request if the request is done
 20 * @engine: the hardware engine
 21 * @req: the request need to be finalized
 22 * @err: error number
 23 */
 24static void crypto_finalize_request(struct crypto_engine *engine,
 25			     struct crypto_async_request *req, int err)
 26{
 27	unsigned long flags;
 28	bool finalize_cur_req = false;
 29	int ret;
 30	struct crypto_engine_ctx *enginectx;
 31
 32	spin_lock_irqsave(&engine->queue_lock, flags);
 33	if (engine->cur_req == req)
 34		finalize_cur_req = true;
 35	spin_unlock_irqrestore(&engine->queue_lock, flags);
 
 
 
 
 
 
 
 
 
 36
 37	if (finalize_cur_req) {
 38		enginectx = crypto_tfm_ctx(req->tfm);
 39		if (engine->cur_req_prepared &&
 40		    enginectx->op.unprepare_request) {
 41			ret = enginectx->op.unprepare_request(engine, req);
 42			if (ret)
 43				dev_err(engine->dev, "failed to unprepare request\n");
 44		}
 45		spin_lock_irqsave(&engine->queue_lock, flags);
 46		engine->cur_req = NULL;
 47		engine->cur_req_prepared = false;
 48		spin_unlock_irqrestore(&engine->queue_lock, flags);
 49	}
 50
 51	req->complete(req, err);
 52
 53	kthread_queue_work(engine->kworker, &engine->pump_requests);
 54}
 55
 56/**
 57 * crypto_pump_requests - dequeue one request from engine queue to process
 58 * @engine: the hardware engine
 59 * @in_kthread: true if we are in the context of the request pump thread
 60 *
 61 * This function checks if there is any request in the engine queue that
 62 * needs processing and if so call out to the driver to initialize hardware
 63 * and handle each request.
 64 */
 65static void crypto_pump_requests(struct crypto_engine *engine,
 66				 bool in_kthread)
 67{
 68	struct crypto_async_request *async_req, *backlog;
 69	unsigned long flags;
 70	bool was_busy = false;
 71	int ret;
 72	struct crypto_engine_ctx *enginectx;
 73
 74	spin_lock_irqsave(&engine->queue_lock, flags);
 75
 76	/* Make sure we are not already running a request */
 77	if (engine->cur_req)
 78		goto out;
 79
 80	/* If another context is idling then defer */
 81	if (engine->idling) {
 82		kthread_queue_work(engine->kworker, &engine->pump_requests);
 83		goto out;
 84	}
 85
 86	/* Check if the engine queue is idle */
 87	if (!crypto_queue_len(&engine->queue) || !engine->running) {
 88		if (!engine->busy)
 89			goto out;
 90
 91		/* Only do teardown in the thread */
 92		if (!in_kthread) {
 93			kthread_queue_work(engine->kworker,
 94					   &engine->pump_requests);
 95			goto out;
 96		}
 97
 98		engine->busy = false;
 99		engine->idling = true;
100		spin_unlock_irqrestore(&engine->queue_lock, flags);
101
102		if (engine->unprepare_crypt_hardware &&
103		    engine->unprepare_crypt_hardware(engine))
104			dev_err(engine->dev, "failed to unprepare crypt hardware\n");
105
106		spin_lock_irqsave(&engine->queue_lock, flags);
107		engine->idling = false;
108		goto out;
109	}
110
 
111	/* Get the fist request from the engine queue to handle */
112	backlog = crypto_get_backlog(&engine->queue);
113	async_req = crypto_dequeue_request(&engine->queue);
114	if (!async_req)
115		goto out;
116
117	engine->cur_req = async_req;
 
 
 
 
 
 
 
118	if (backlog)
119		backlog->complete(backlog, -EINPROGRESS);
120
121	if (engine->busy)
122		was_busy = true;
123	else
124		engine->busy = true;
125
126	spin_unlock_irqrestore(&engine->queue_lock, flags);
127
128	/* Until here we get the request need to be encrypted successfully */
129	if (!was_busy && engine->prepare_crypt_hardware) {
130		ret = engine->prepare_crypt_hardware(engine);
131		if (ret) {
132			dev_err(engine->dev, "failed to prepare crypt hardware\n");
133			goto req_err;
134		}
135	}
136
137	enginectx = crypto_tfm_ctx(async_req->tfm);
138
139	if (enginectx->op.prepare_request) {
140		ret = enginectx->op.prepare_request(engine, async_req);
141		if (ret) {
142			dev_err(engine->dev, "failed to prepare request: %d\n",
143				ret);
144			goto req_err;
145		}
146		engine->cur_req_prepared = true;
147	}
148	if (!enginectx->op.do_one_request) {
149		dev_err(engine->dev, "failed to do request\n");
150		ret = -EINVAL;
151		goto req_err;
152	}
 
153	ret = enginectx->op.do_one_request(engine, async_req);
154	if (ret) {
155		dev_err(engine->dev, "Failed to do one request from queue: %d\n", ret);
156		goto req_err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
157	}
158	return;
159
160req_err:
161	crypto_finalize_request(engine, async_req, ret);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
162	return;
163
164out:
165	spin_unlock_irqrestore(&engine->queue_lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
166}
167
168static void crypto_pump_work(struct kthread_work *work)
169{
170	struct crypto_engine *engine =
171		container_of(work, struct crypto_engine, pump_requests);
172
173	crypto_pump_requests(engine, true);
174}
175
176/**
177 * crypto_transfer_request - transfer the new request into the engine queue
178 * @engine: the hardware engine
179 * @req: the request need to be listed into the engine queue
180 */
181static int crypto_transfer_request(struct crypto_engine *engine,
182				   struct crypto_async_request *req,
183				   bool need_pump)
184{
185	unsigned long flags;
186	int ret;
187
188	spin_lock_irqsave(&engine->queue_lock, flags);
189
190	if (!engine->running) {
191		spin_unlock_irqrestore(&engine->queue_lock, flags);
192		return -ESHUTDOWN;
193	}
194
195	ret = crypto_enqueue_request(&engine->queue, req);
196
197	if (!engine->busy && need_pump)
198		kthread_queue_work(engine->kworker, &engine->pump_requests);
199
200	spin_unlock_irqrestore(&engine->queue_lock, flags);
201	return ret;
202}
203
204/**
205 * crypto_transfer_request_to_engine - transfer one request to list
206 * into the engine queue
207 * @engine: the hardware engine
208 * @req: the request need to be listed into the engine queue
209 */
210static int crypto_transfer_request_to_engine(struct crypto_engine *engine,
211					     struct crypto_async_request *req)
212{
213	return crypto_transfer_request(engine, req, true);
214}
215
216/**
217 * crypto_transfer_ablkcipher_request_to_engine - transfer one ablkcipher_request
218 * to list into the engine queue
219 * @engine: the hardware engine
220 * @req: the request need to be listed into the engine queue
221 * TODO: Remove this function when skcipher conversion is finished
222 */
223int crypto_transfer_ablkcipher_request_to_engine(struct crypto_engine *engine,
224						 struct ablkcipher_request *req)
225{
226	return crypto_transfer_request_to_engine(engine, &req->base);
227}
228EXPORT_SYMBOL_GPL(crypto_transfer_ablkcipher_request_to_engine);
229
230/**
231 * crypto_transfer_aead_request_to_engine - transfer one aead_request
232 * to list into the engine queue
233 * @engine: the hardware engine
234 * @req: the request need to be listed into the engine queue
235 */
236int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine,
237					   struct aead_request *req)
238{
239	return crypto_transfer_request_to_engine(engine, &req->base);
240}
241EXPORT_SYMBOL_GPL(crypto_transfer_aead_request_to_engine);
242
243/**
244 * crypto_transfer_akcipher_request_to_engine - transfer one akcipher_request
245 * to list into the engine queue
246 * @engine: the hardware engine
247 * @req: the request need to be listed into the engine queue
248 */
249int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
250					       struct akcipher_request *req)
251{
252	return crypto_transfer_request_to_engine(engine, &req->base);
253}
254EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_request_to_engine);
255
256/**
257 * crypto_transfer_hash_request_to_engine - transfer one ahash_request
258 * to list into the engine queue
259 * @engine: the hardware engine
260 * @req: the request need to be listed into the engine queue
261 */
262int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
263					   struct ahash_request *req)
264{
265	return crypto_transfer_request_to_engine(engine, &req->base);
266}
267EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
268
269/**
270 * crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request
271 * to list into the engine queue
272 * @engine: the hardware engine
273 * @req: the request need to be listed into the engine queue
274 */
275int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
276					       struct skcipher_request *req)
277{
278	return crypto_transfer_request_to_engine(engine, &req->base);
279}
280EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine);
281
282/**
283 * crypto_finalize_ablkcipher_request - finalize one ablkcipher_request if
284 * the request is done
285 * @engine: the hardware engine
286 * @req: the request need to be finalized
287 * @err: error number
288 * TODO: Remove this function when skcipher conversion is finished
289 */
290void crypto_finalize_ablkcipher_request(struct crypto_engine *engine,
291					struct ablkcipher_request *req, int err)
292{
293	return crypto_finalize_request(engine, &req->base, err);
294}
295EXPORT_SYMBOL_GPL(crypto_finalize_ablkcipher_request);
296
297/**
298 * crypto_finalize_aead_request - finalize one aead_request if
299 * the request is done
300 * @engine: the hardware engine
301 * @req: the request need to be finalized
302 * @err: error number
303 */
304void crypto_finalize_aead_request(struct crypto_engine *engine,
305				  struct aead_request *req, int err)
306{
307	return crypto_finalize_request(engine, &req->base, err);
308}
309EXPORT_SYMBOL_GPL(crypto_finalize_aead_request);
310
311/**
312 * crypto_finalize_akcipher_request - finalize one akcipher_request if
313 * the request is done
314 * @engine: the hardware engine
315 * @req: the request need to be finalized
316 * @err: error number
317 */
318void crypto_finalize_akcipher_request(struct crypto_engine *engine,
319				      struct akcipher_request *req, int err)
320{
321	return crypto_finalize_request(engine, &req->base, err);
322}
323EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_request);
324
325/**
326 * crypto_finalize_hash_request - finalize one ahash_request if
327 * the request is done
328 * @engine: the hardware engine
329 * @req: the request need to be finalized
330 * @err: error number
331 */
332void crypto_finalize_hash_request(struct crypto_engine *engine,
333				  struct ahash_request *req, int err)
334{
335	return crypto_finalize_request(engine, &req->base, err);
336}
337EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
338
339/**
340 * crypto_finalize_skcipher_request - finalize one skcipher_request if
341 * the request is done
342 * @engine: the hardware engine
343 * @req: the request need to be finalized
344 * @err: error number
345 */
346void crypto_finalize_skcipher_request(struct crypto_engine *engine,
347				      struct skcipher_request *req, int err)
348{
349	return crypto_finalize_request(engine, &req->base, err);
350}
351EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_request);
352
353/**
354 * crypto_engine_start - start the hardware engine
355 * @engine: the hardware engine need to be started
356 *
357 * Return 0 on success, else on fail.
358 */
359int crypto_engine_start(struct crypto_engine *engine)
360{
361	unsigned long flags;
362
363	spin_lock_irqsave(&engine->queue_lock, flags);
364
365	if (engine->running || engine->busy) {
366		spin_unlock_irqrestore(&engine->queue_lock, flags);
367		return -EBUSY;
368	}
369
370	engine->running = true;
371	spin_unlock_irqrestore(&engine->queue_lock, flags);
372
373	kthread_queue_work(engine->kworker, &engine->pump_requests);
374
375	return 0;
376}
377EXPORT_SYMBOL_GPL(crypto_engine_start);
378
379/**
380 * crypto_engine_stop - stop the hardware engine
381 * @engine: the hardware engine need to be stopped
382 *
383 * Return 0 on success, else on fail.
384 */
385int crypto_engine_stop(struct crypto_engine *engine)
386{
387	unsigned long flags;
388	unsigned int limit = 500;
389	int ret = 0;
390
391	spin_lock_irqsave(&engine->queue_lock, flags);
392
393	/*
394	 * If the engine queue is not empty or the engine is on busy state,
395	 * we need to wait for a while to pump the requests of engine queue.
396	 */
397	while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
398		spin_unlock_irqrestore(&engine->queue_lock, flags);
399		msleep(20);
400		spin_lock_irqsave(&engine->queue_lock, flags);
401	}
402
403	if (crypto_queue_len(&engine->queue) || engine->busy)
404		ret = -EBUSY;
405	else
406		engine->running = false;
407
408	spin_unlock_irqrestore(&engine->queue_lock, flags);
409
410	if (ret)
411		dev_warn(engine->dev, "could not stop engine\n");
412
413	return ret;
414}
415EXPORT_SYMBOL_GPL(crypto_engine_stop);
416
417/**
418 * crypto_engine_alloc_init - allocate crypto hardware engine structure and
419 * initialize it.
 
420 * @dev: the device attached with one hardware engine
 
 
 
 
 
 
 
421 * @rt: whether this queue is set to run as a realtime task
 
422 *
423 * This must be called from context that can sleep.
424 * Return: the crypto engine structure on success, else NULL.
425 */
426struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
 
 
 
427{
428	struct sched_param param = { .sched_priority = MAX_RT_PRIO / 2 };
429	struct crypto_engine *engine;
430
431	if (!dev)
432		return NULL;
433
434	engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
435	if (!engine)
436		return NULL;
437
438	engine->dev = dev;
439	engine->rt = rt;
440	engine->running = false;
441	engine->busy = false;
442	engine->idling = false;
443	engine->cur_req_prepared = false;
444	engine->priv_data = dev;
 
 
 
 
 
 
445	snprintf(engine->name, sizeof(engine->name),
446		 "%s-engine", dev_name(dev));
447
448	crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN);
449	spin_lock_init(&engine->queue_lock);
450
451	engine->kworker = kthread_create_worker(0, "%s", engine->name);
452	if (IS_ERR(engine->kworker)) {
453		dev_err(dev, "failed to create crypto request pump task\n");
454		return NULL;
455	}
456	kthread_init_work(&engine->pump_requests, crypto_pump_work);
457
458	if (engine->rt) {
459		dev_info(dev, "will run requests pump with realtime priority\n");
460		sched_setscheduler(engine->kworker->task, SCHED_FIFO, &param);
461	}
462
463	return engine;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
464}
465EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
466
467/**
468 * crypto_engine_exit - free the resources of hardware engine when exit
469 * @engine: the hardware engine need to be freed
470 *
471 * Return 0 for success.
472 */
473int crypto_engine_exit(struct crypto_engine *engine)
474{
475	int ret;
476
477	ret = crypto_engine_stop(engine);
478	if (ret)
479		return ret;
480
481	kthread_destroy_worker(engine->kworker);
482
483	return 0;
484}
485EXPORT_SYMBOL_GPL(crypto_engine_exit);
486
487MODULE_LICENSE("GPL");
488MODULE_DESCRIPTION("Crypto hardware engine framework");
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Handle async block request by crypto hardware engine.
  4 *
  5 * Copyright (C) 2016 Linaro, Inc.
  6 *
  7 * Author: Baolin Wang <baolin.wang@linaro.org>
  8 */
  9
 10#include <linux/err.h>
 11#include <linux/delay.h>
 12#include <linux/device.h>
 13#include <crypto/engine.h>
 14#include <uapi/linux/sched/types.h>
 15#include "internal.h"
 16
 17#define CRYPTO_ENGINE_MAX_QLEN 10
 18
 19/**
 20 * crypto_finalize_request - finalize one request if the request is done
 21 * @engine: the hardware engine
 22 * @req: the request need to be finalized
 23 * @err: error number
 24 */
 25static void crypto_finalize_request(struct crypto_engine *engine,
 26				    struct crypto_async_request *req, int err)
 27{
 28	unsigned long flags;
 29	bool finalize_req = false;
 30	int ret;
 31	struct crypto_engine_ctx *enginectx;
 32
 33	/*
 34	 * If hardware cannot enqueue more requests
 35	 * and retry mechanism is not supported
 36	 * make sure we are completing the current request
 37	 */
 38	if (!engine->retry_support) {
 39		spin_lock_irqsave(&engine->queue_lock, flags);
 40		if (engine->cur_req == req) {
 41			finalize_req = true;
 42			engine->cur_req = NULL;
 43		}
 44		spin_unlock_irqrestore(&engine->queue_lock, flags);
 45	}
 46
 47	if (finalize_req || engine->retry_support) {
 48		enginectx = crypto_tfm_ctx(req->tfm);
 49		if (enginectx->op.prepare_request &&
 50		    enginectx->op.unprepare_request) {
 51			ret = enginectx->op.unprepare_request(engine, req);
 52			if (ret)
 53				dev_err(engine->dev, "failed to unprepare request\n");
 54		}
 
 
 
 
 55	}
 
 56	req->complete(req, err);
 57
 58	kthread_queue_work(engine->kworker, &engine->pump_requests);
 59}
 60
 61/**
 62 * crypto_pump_requests - dequeue one request from engine queue to process
 63 * @engine: the hardware engine
 64 * @in_kthread: true if we are in the context of the request pump thread
 65 *
 66 * This function checks if there is any request in the engine queue that
 67 * needs processing and if so call out to the driver to initialize hardware
 68 * and handle each request.
 69 */
 70static void crypto_pump_requests(struct crypto_engine *engine,
 71				 bool in_kthread)
 72{
 73	struct crypto_async_request *async_req, *backlog;
 74	unsigned long flags;
 75	bool was_busy = false;
 76	int ret;
 77	struct crypto_engine_ctx *enginectx;
 78
 79	spin_lock_irqsave(&engine->queue_lock, flags);
 80
 81	/* Make sure we are not already running a request */
 82	if (!engine->retry_support && engine->cur_req)
 83		goto out;
 84
 85	/* If another context is idling then defer */
 86	if (engine->idling) {
 87		kthread_queue_work(engine->kworker, &engine->pump_requests);
 88		goto out;
 89	}
 90
 91	/* Check if the engine queue is idle */
 92	if (!crypto_queue_len(&engine->queue) || !engine->running) {
 93		if (!engine->busy)
 94			goto out;
 95
 96		/* Only do teardown in the thread */
 97		if (!in_kthread) {
 98			kthread_queue_work(engine->kworker,
 99					   &engine->pump_requests);
100			goto out;
101		}
102
103		engine->busy = false;
104		engine->idling = true;
105		spin_unlock_irqrestore(&engine->queue_lock, flags);
106
107		if (engine->unprepare_crypt_hardware &&
108		    engine->unprepare_crypt_hardware(engine))
109			dev_err(engine->dev, "failed to unprepare crypt hardware\n");
110
111		spin_lock_irqsave(&engine->queue_lock, flags);
112		engine->idling = false;
113		goto out;
114	}
115
116start_request:
117	/* Get the fist request from the engine queue to handle */
118	backlog = crypto_get_backlog(&engine->queue);
119	async_req = crypto_dequeue_request(&engine->queue);
120	if (!async_req)
121		goto out;
122
123	/*
124	 * If hardware doesn't support the retry mechanism,
125	 * keep track of the request we are processing now.
126	 * We'll need it on completion (crypto_finalize_request).
127	 */
128	if (!engine->retry_support)
129		engine->cur_req = async_req;
130
131	if (backlog)
132		backlog->complete(backlog, -EINPROGRESS);
133
134	if (engine->busy)
135		was_busy = true;
136	else
137		engine->busy = true;
138
139	spin_unlock_irqrestore(&engine->queue_lock, flags);
140
141	/* Until here we get the request need to be encrypted successfully */
142	if (!was_busy && engine->prepare_crypt_hardware) {
143		ret = engine->prepare_crypt_hardware(engine);
144		if (ret) {
145			dev_err(engine->dev, "failed to prepare crypt hardware\n");
146			goto req_err_2;
147		}
148	}
149
150	enginectx = crypto_tfm_ctx(async_req->tfm);
151
152	if (enginectx->op.prepare_request) {
153		ret = enginectx->op.prepare_request(engine, async_req);
154		if (ret) {
155			dev_err(engine->dev, "failed to prepare request: %d\n",
156				ret);
157			goto req_err_2;
158		}
 
159	}
160	if (!enginectx->op.do_one_request) {
161		dev_err(engine->dev, "failed to do request\n");
162		ret = -EINVAL;
163		goto req_err_1;
164	}
165
166	ret = enginectx->op.do_one_request(engine, async_req);
167
168	/* Request unsuccessfully executed by hardware */
169	if (ret < 0) {
170		/*
171		 * If hardware queue is full (-ENOSPC), requeue request
172		 * regardless of backlog flag.
173		 * Otherwise, unprepare and complete the request.
174		 */
175		if (!engine->retry_support ||
176		    (ret != -ENOSPC)) {
177			dev_err(engine->dev,
178				"Failed to do one request from queue: %d\n",
179				ret);
180			goto req_err_1;
181		}
182		/*
183		 * If retry mechanism is supported,
184		 * unprepare current request and
185		 * enqueue it back into crypto-engine queue.
186		 */
187		if (enginectx->op.unprepare_request) {
188			ret = enginectx->op.unprepare_request(engine,
189							      async_req);
190			if (ret)
191				dev_err(engine->dev,
192					"failed to unprepare request\n");
193		}
194		spin_lock_irqsave(&engine->queue_lock, flags);
195		/*
196		 * If hardware was unable to execute request, enqueue it
197		 * back in front of crypto-engine queue, to keep the order
198		 * of requests.
199		 */
200		crypto_enqueue_request_head(&engine->queue, async_req);
201
202		kthread_queue_work(engine->kworker, &engine->pump_requests);
203		goto out;
204	}
 
205
206	goto retry;
207
208req_err_1:
209	if (enginectx->op.unprepare_request) {
210		ret = enginectx->op.unprepare_request(engine, async_req);
211		if (ret)
212			dev_err(engine->dev, "failed to unprepare request\n");
213	}
214
215req_err_2:
216	async_req->complete(async_req, ret);
217
218retry:
219	/* If retry mechanism is supported, send new requests to engine */
220	if (engine->retry_support) {
221		spin_lock_irqsave(&engine->queue_lock, flags);
222		goto start_request;
223	}
224	return;
225
226out:
227	spin_unlock_irqrestore(&engine->queue_lock, flags);
228
229	/*
230	 * Batch requests is possible only if
231	 * hardware can enqueue multiple requests
232	 */
233	if (engine->do_batch_requests) {
234		ret = engine->do_batch_requests(engine);
235		if (ret)
236			dev_err(engine->dev, "failed to do batch requests: %d\n",
237				ret);
238	}
239
240	return;
241}
242
243static void crypto_pump_work(struct kthread_work *work)
244{
245	struct crypto_engine *engine =
246		container_of(work, struct crypto_engine, pump_requests);
247
248	crypto_pump_requests(engine, true);
249}
250
251/**
252 * crypto_transfer_request - transfer the new request into the engine queue
253 * @engine: the hardware engine
254 * @req: the request need to be listed into the engine queue
255 */
256static int crypto_transfer_request(struct crypto_engine *engine,
257				   struct crypto_async_request *req,
258				   bool need_pump)
259{
260	unsigned long flags;
261	int ret;
262
263	spin_lock_irqsave(&engine->queue_lock, flags);
264
265	if (!engine->running) {
266		spin_unlock_irqrestore(&engine->queue_lock, flags);
267		return -ESHUTDOWN;
268	}
269
270	ret = crypto_enqueue_request(&engine->queue, req);
271
272	if (!engine->busy && need_pump)
273		kthread_queue_work(engine->kworker, &engine->pump_requests);
274
275	spin_unlock_irqrestore(&engine->queue_lock, flags);
276	return ret;
277}
278
279/**
280 * crypto_transfer_request_to_engine - transfer one request to list
281 * into the engine queue
282 * @engine: the hardware engine
283 * @req: the request need to be listed into the engine queue
284 */
285static int crypto_transfer_request_to_engine(struct crypto_engine *engine,
286					     struct crypto_async_request *req)
287{
288	return crypto_transfer_request(engine, req, true);
289}
290
291/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
292 * crypto_transfer_aead_request_to_engine - transfer one aead_request
293 * to list into the engine queue
294 * @engine: the hardware engine
295 * @req: the request need to be listed into the engine queue
296 */
297int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine,
298					   struct aead_request *req)
299{
300	return crypto_transfer_request_to_engine(engine, &req->base);
301}
302EXPORT_SYMBOL_GPL(crypto_transfer_aead_request_to_engine);
303
304/**
305 * crypto_transfer_akcipher_request_to_engine - transfer one akcipher_request
306 * to list into the engine queue
307 * @engine: the hardware engine
308 * @req: the request need to be listed into the engine queue
309 */
310int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
311					       struct akcipher_request *req)
312{
313	return crypto_transfer_request_to_engine(engine, &req->base);
314}
315EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_request_to_engine);
316
317/**
318 * crypto_transfer_hash_request_to_engine - transfer one ahash_request
319 * to list into the engine queue
320 * @engine: the hardware engine
321 * @req: the request need to be listed into the engine queue
322 */
323int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
324					   struct ahash_request *req)
325{
326	return crypto_transfer_request_to_engine(engine, &req->base);
327}
328EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
329
330/**
331 * crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request
332 * to list into the engine queue
333 * @engine: the hardware engine
334 * @req: the request need to be listed into the engine queue
335 */
336int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
337					       struct skcipher_request *req)
338{
339	return crypto_transfer_request_to_engine(engine, &req->base);
340}
341EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine);
342
343/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
344 * crypto_finalize_aead_request - finalize one aead_request if
345 * the request is done
346 * @engine: the hardware engine
347 * @req: the request need to be finalized
348 * @err: error number
349 */
350void crypto_finalize_aead_request(struct crypto_engine *engine,
351				  struct aead_request *req, int err)
352{
353	return crypto_finalize_request(engine, &req->base, err);
354}
355EXPORT_SYMBOL_GPL(crypto_finalize_aead_request);
356
357/**
358 * crypto_finalize_akcipher_request - finalize one akcipher_request if
359 * the request is done
360 * @engine: the hardware engine
361 * @req: the request need to be finalized
362 * @err: error number
363 */
364void crypto_finalize_akcipher_request(struct crypto_engine *engine,
365				      struct akcipher_request *req, int err)
366{
367	return crypto_finalize_request(engine, &req->base, err);
368}
369EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_request);
370
371/**
372 * crypto_finalize_hash_request - finalize one ahash_request if
373 * the request is done
374 * @engine: the hardware engine
375 * @req: the request need to be finalized
376 * @err: error number
377 */
378void crypto_finalize_hash_request(struct crypto_engine *engine,
379				  struct ahash_request *req, int err)
380{
381	return crypto_finalize_request(engine, &req->base, err);
382}
383EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
384
385/**
386 * crypto_finalize_skcipher_request - finalize one skcipher_request if
387 * the request is done
388 * @engine: the hardware engine
389 * @req: the request need to be finalized
390 * @err: error number
391 */
392void crypto_finalize_skcipher_request(struct crypto_engine *engine,
393				      struct skcipher_request *req, int err)
394{
395	return crypto_finalize_request(engine, &req->base, err);
396}
397EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_request);
398
399/**
400 * crypto_engine_start - start the hardware engine
401 * @engine: the hardware engine need to be started
402 *
403 * Return 0 on success, else on fail.
404 */
405int crypto_engine_start(struct crypto_engine *engine)
406{
407	unsigned long flags;
408
409	spin_lock_irqsave(&engine->queue_lock, flags);
410
411	if (engine->running || engine->busy) {
412		spin_unlock_irqrestore(&engine->queue_lock, flags);
413		return -EBUSY;
414	}
415
416	engine->running = true;
417	spin_unlock_irqrestore(&engine->queue_lock, flags);
418
419	kthread_queue_work(engine->kworker, &engine->pump_requests);
420
421	return 0;
422}
423EXPORT_SYMBOL_GPL(crypto_engine_start);
424
425/**
426 * crypto_engine_stop - stop the hardware engine
427 * @engine: the hardware engine need to be stopped
428 *
429 * Return 0 on success, else on fail.
430 */
431int crypto_engine_stop(struct crypto_engine *engine)
432{
433	unsigned long flags;
434	unsigned int limit = 500;
435	int ret = 0;
436
437	spin_lock_irqsave(&engine->queue_lock, flags);
438
439	/*
440	 * If the engine queue is not empty or the engine is on busy state,
441	 * we need to wait for a while to pump the requests of engine queue.
442	 */
443	while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
444		spin_unlock_irqrestore(&engine->queue_lock, flags);
445		msleep(20);
446		spin_lock_irqsave(&engine->queue_lock, flags);
447	}
448
449	if (crypto_queue_len(&engine->queue) || engine->busy)
450		ret = -EBUSY;
451	else
452		engine->running = false;
453
454	spin_unlock_irqrestore(&engine->queue_lock, flags);
455
456	if (ret)
457		dev_warn(engine->dev, "could not stop engine\n");
458
459	return ret;
460}
461EXPORT_SYMBOL_GPL(crypto_engine_stop);
462
463/**
464 * crypto_engine_alloc_init_and_set - allocate crypto hardware engine structure
465 * and initialize it by setting the maximum number of entries in the software
466 * crypto-engine queue.
467 * @dev: the device attached with one hardware engine
468 * @retry_support: whether hardware has support for retry mechanism
469 * @cbk_do_batch: pointer to a callback function to be invoked when executing
470 *                a batch of requests.
471 *                This has the form:
472 *                callback(struct crypto_engine *engine)
473 *                where:
474 *                @engine: the crypto engine structure.
475 * @rt: whether this queue is set to run as a realtime task
476 * @qlen: maximum size of the crypto-engine queue
477 *
478 * This must be called from context that can sleep.
479 * Return: the crypto engine structure on success, else NULL.
480 */
481struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
482						       bool retry_support,
483						       int (*cbk_do_batch)(struct crypto_engine *engine),
484						       bool rt, int qlen)
485{
 
486	struct crypto_engine *engine;
487
488	if (!dev)
489		return NULL;
490
491	engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
492	if (!engine)
493		return NULL;
494
495	engine->dev = dev;
496	engine->rt = rt;
497	engine->running = false;
498	engine->busy = false;
499	engine->idling = false;
500	engine->retry_support = retry_support;
501	engine->priv_data = dev;
502	/*
503	 * Batch requests is possible only if
504	 * hardware has support for retry mechanism.
505	 */
506	engine->do_batch_requests = retry_support ? cbk_do_batch : NULL;
507
508	snprintf(engine->name, sizeof(engine->name),
509		 "%s-engine", dev_name(dev));
510
511	crypto_init_queue(&engine->queue, qlen);
512	spin_lock_init(&engine->queue_lock);
513
514	engine->kworker = kthread_create_worker(0, "%s", engine->name);
515	if (IS_ERR(engine->kworker)) {
516		dev_err(dev, "failed to create crypto request pump task\n");
517		return NULL;
518	}
519	kthread_init_work(&engine->pump_requests, crypto_pump_work);
520
521	if (engine->rt) {
522		dev_info(dev, "will run requests pump with realtime priority\n");
523		sched_set_fifo(engine->kworker->task);
524	}
525
526	return engine;
527}
528EXPORT_SYMBOL_GPL(crypto_engine_alloc_init_and_set);
529
530/**
531 * crypto_engine_alloc_init - allocate crypto hardware engine structure and
532 * initialize it.
533 * @dev: the device attached with one hardware engine
534 * @rt: whether this queue is set to run as a realtime task
535 *
536 * This must be called from context that can sleep.
537 * Return: the crypto engine structure on success, else NULL.
538 */
539struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
540{
541	return crypto_engine_alloc_init_and_set(dev, false, NULL, rt,
542						CRYPTO_ENGINE_MAX_QLEN);
543}
544EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
545
546/**
547 * crypto_engine_exit - free the resources of hardware engine when exit
548 * @engine: the hardware engine need to be freed
549 *
550 * Return 0 for success.
551 */
552int crypto_engine_exit(struct crypto_engine *engine)
553{
554	int ret;
555
556	ret = crypto_engine_stop(engine);
557	if (ret)
558		return ret;
559
560	kthread_destroy_worker(engine->kworker);
561
562	return 0;
563}
564EXPORT_SYMBOL_GPL(crypto_engine_exit);
565
566MODULE_LICENSE("GPL");
567MODULE_DESCRIPTION("Crypto hardware engine framework");