Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 * Handle async block request by crypto hardware engine.
  3 *
  4 * Copyright (C) 2016 Linaro, Inc.
  5 *
  6 * Author: Baolin Wang <baolin.wang@linaro.org>
  7 *
  8 * This program is free software; you can redistribute it and/or modify it
  9 * under the terms of the GNU General Public License as published by the Free
 10 * Software Foundation; either version 2 of the License, or (at your option)
 11 * any later version.
 12 *
 13 */
 14
 15#include <linux/err.h>
 16#include <linux/delay.h>
 
 
 
 17#include "internal.h"
 18
 19#define CRYPTO_ENGINE_MAX_QLEN 10
 20
 21void crypto_finalize_request(struct crypto_engine *engine,
 22			     struct ablkcipher_request *req, int err);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 23
 24/**
 25 * crypto_pump_requests - dequeue one request from engine queue to process
 26 * @engine: the hardware engine
 27 * @in_kthread: true if we are in the context of the request pump thread
 28 *
 29 * This function checks if there is any request in the engine queue that
 30 * needs processing and if so call out to the driver to initialize hardware
 31 * and handle each request.
 32 */
 33static void crypto_pump_requests(struct crypto_engine *engine,
 34				 bool in_kthread)
 35{
 36	struct crypto_async_request *async_req, *backlog;
 37	struct ablkcipher_request *req;
 38	unsigned long flags;
 39	bool was_busy = false;
 40	int ret;
 
 41
 42	spin_lock_irqsave(&engine->queue_lock, flags);
 43
 44	/* Make sure we are not already running a request */
 45	if (engine->cur_req)
 46		goto out;
 47
 48	/* If another context is idling then defer */
 49	if (engine->idling) {
 50		queue_kthread_work(&engine->kworker, &engine->pump_requests);
 51		goto out;
 52	}
 53
 54	/* Check if the engine queue is idle */
 55	if (!crypto_queue_len(&engine->queue) || !engine->running) {
 56		if (!engine->busy)
 57			goto out;
 58
 59		/* Only do teardown in the thread */
 60		if (!in_kthread) {
 61			queue_kthread_work(&engine->kworker,
 62					   &engine->pump_requests);
 63			goto out;
 64		}
 65
 66		engine->busy = false;
 67		engine->idling = true;
 68		spin_unlock_irqrestore(&engine->queue_lock, flags);
 69
 70		if (engine->unprepare_crypt_hardware &&
 71		    engine->unprepare_crypt_hardware(engine))
 72			pr_err("failed to unprepare crypt hardware\n");
 73
 74		spin_lock_irqsave(&engine->queue_lock, flags);
 75		engine->idling = false;
 76		goto out;
 77	}
 78
 
 79	/* Get the fist request from the engine queue to handle */
 80	backlog = crypto_get_backlog(&engine->queue);
 81	async_req = crypto_dequeue_request(&engine->queue);
 82	if (!async_req)
 83		goto out;
 84
 85	req = ablkcipher_request_cast(async_req);
 
 
 
 
 
 
 86
 87	engine->cur_req = req;
 88	if (backlog)
 89		backlog->complete(backlog, -EINPROGRESS);
 90
 91	if (engine->busy)
 92		was_busy = true;
 93	else
 94		engine->busy = true;
 95
 96	spin_unlock_irqrestore(&engine->queue_lock, flags);
 97
 98	/* Until here we get the request need to be encrypted successfully */
 99	if (!was_busy && engine->prepare_crypt_hardware) {
100		ret = engine->prepare_crypt_hardware(engine);
101		if (ret) {
102			pr_err("failed to prepare crypt hardware\n");
103			goto req_err;
104		}
105	}
106
107	if (engine->prepare_request) {
108		ret = engine->prepare_request(engine, engine->cur_req);
 
 
109		if (ret) {
110			pr_err("failed to prepare request: %d\n", ret);
111			goto req_err;
 
112		}
113		engine->cur_req_prepared = true;
114	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
116	ret = engine->crypt_one_request(engine, engine->cur_req);
117	if (ret) {
118		pr_err("failed to crypt one request from queue\n");
119		goto req_err;
 
 
 
 
 
 
 
120	}
121	return;
122
123req_err:
124	crypto_finalize_request(engine, engine->cur_req, ret);
 
 
 
 
 
 
 
125	return;
126
127out:
128	spin_unlock_irqrestore(&engine->queue_lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
129}
130
131static void crypto_pump_work(struct kthread_work *work)
132{
133	struct crypto_engine *engine =
134		container_of(work, struct crypto_engine, pump_requests);
135
136	crypto_pump_requests(engine, true);
137}
138
139/**
140 * crypto_transfer_request - transfer the new request into the engine queue
141 * @engine: the hardware engine
142 * @req: the request need to be listed into the engine queue
143 */
144int crypto_transfer_request(struct crypto_engine *engine,
145			    struct ablkcipher_request *req, bool need_pump)
 
146{
147	unsigned long flags;
148	int ret;
149
150	spin_lock_irqsave(&engine->queue_lock, flags);
151
152	if (!engine->running) {
153		spin_unlock_irqrestore(&engine->queue_lock, flags);
154		return -ESHUTDOWN;
155	}
156
157	ret = ablkcipher_enqueue_request(&engine->queue, req);
158
159	if (!engine->busy && need_pump)
160		queue_kthread_work(&engine->kworker, &engine->pump_requests);
161
162	spin_unlock_irqrestore(&engine->queue_lock, flags);
163	return ret;
164}
165EXPORT_SYMBOL_GPL(crypto_transfer_request);
166
167/**
168 * crypto_transfer_request_to_engine - transfer one request to list into the
169 * engine queue
170 * @engine: the hardware engine
171 * @req: the request need to be listed into the engine queue
172 */
173int crypto_transfer_request_to_engine(struct crypto_engine *engine,
174				      struct ablkcipher_request *req)
175{
176	return crypto_transfer_request(engine, req, true);
177}
178EXPORT_SYMBOL_GPL(crypto_transfer_request_to_engine);
179
180/**
181 * crypto_finalize_request - finalize one request if the request is done
 
182 * @engine: the hardware engine
183 * @req: the request need to be finalized
184 * @err: error number
185 */
186void crypto_finalize_request(struct crypto_engine *engine,
187			     struct ablkcipher_request *req, int err)
188{
189	unsigned long flags;
190	bool finalize_cur_req = false;
191	int ret;
192
193	spin_lock_irqsave(&engine->queue_lock, flags);
194	if (engine->cur_req == req)
195		finalize_cur_req = true;
196	spin_unlock_irqrestore(&engine->queue_lock, flags);
 
 
 
 
 
 
 
 
197
198	if (finalize_cur_req) {
199		if (engine->cur_req_prepared && engine->unprepare_request) {
200			ret = engine->unprepare_request(engine, req);
201			if (ret)
202				pr_err("failed to unprepare request\n");
203		}
 
 
 
 
 
 
204
205		spin_lock_irqsave(&engine->queue_lock, flags);
206		engine->cur_req = NULL;
207		engine->cur_req_prepared = false;
208		spin_unlock_irqrestore(&engine->queue_lock, flags);
209	}
 
 
 
 
 
 
 
210
211	req->base.complete(&req->base, err);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
212
213	queue_kthread_work(&engine->kworker, &engine->pump_requests);
 
 
 
 
 
 
 
 
 
 
214}
215EXPORT_SYMBOL_GPL(crypto_finalize_request);
216
217/**
218 * crypto_engine_start - start the hardware engine
219 * @engine: the hardware engine need to be started
220 *
221 * Return 0 on success, else on fail.
222 */
223int crypto_engine_start(struct crypto_engine *engine)
224{
225	unsigned long flags;
226
227	spin_lock_irqsave(&engine->queue_lock, flags);
228
229	if (engine->running || engine->busy) {
230		spin_unlock_irqrestore(&engine->queue_lock, flags);
231		return -EBUSY;
232	}
233
234	engine->running = true;
235	spin_unlock_irqrestore(&engine->queue_lock, flags);
236
237	queue_kthread_work(&engine->kworker, &engine->pump_requests);
238
239	return 0;
240}
241EXPORT_SYMBOL_GPL(crypto_engine_start);
242
243/**
244 * crypto_engine_stop - stop the hardware engine
245 * @engine: the hardware engine need to be stopped
246 *
247 * Return 0 on success, else on fail.
248 */
249int crypto_engine_stop(struct crypto_engine *engine)
250{
251	unsigned long flags;
252	unsigned limit = 500;
253	int ret = 0;
254
255	spin_lock_irqsave(&engine->queue_lock, flags);
256
257	/*
258	 * If the engine queue is not empty or the engine is on busy state,
259	 * we need to wait for a while to pump the requests of engine queue.
260	 */
261	while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
262		spin_unlock_irqrestore(&engine->queue_lock, flags);
263		msleep(20);
264		spin_lock_irqsave(&engine->queue_lock, flags);
265	}
266
267	if (crypto_queue_len(&engine->queue) || engine->busy)
268		ret = -EBUSY;
269	else
270		engine->running = false;
271
272	spin_unlock_irqrestore(&engine->queue_lock, flags);
273
274	if (ret)
275		pr_warn("could not stop engine\n");
276
277	return ret;
278}
279EXPORT_SYMBOL_GPL(crypto_engine_stop);
280
281/**
282 * crypto_engine_alloc_init - allocate crypto hardware engine structure and
283 * initialize it.
 
284 * @dev: the device attached with one hardware engine
 
 
 
 
 
 
 
285 * @rt: whether this queue is set to run as a realtime task
 
286 *
287 * This must be called from context that can sleep.
288 * Return: the crypto engine structure on success, else NULL.
289 */
290struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
 
 
 
291{
292	struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
293	struct crypto_engine *engine;
294
295	if (!dev)
296		return NULL;
297
298	engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
299	if (!engine)
300		return NULL;
301
 
302	engine->rt = rt;
303	engine->running = false;
304	engine->busy = false;
305	engine->idling = false;
306	engine->cur_req_prepared = false;
307	engine->priv_data = dev;
 
 
 
 
 
 
308	snprintf(engine->name, sizeof(engine->name),
309		 "%s-engine", dev_name(dev));
310
311	crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN);
312	spin_lock_init(&engine->queue_lock);
313
314	init_kthread_worker(&engine->kworker);
315	engine->kworker_task = kthread_run(kthread_worker_fn,
316					   &engine->kworker, "%s",
317					   engine->name);
318	if (IS_ERR(engine->kworker_task)) {
319		dev_err(dev, "failed to create crypto request pump task\n");
320		return NULL;
321	}
322	init_kthread_work(&engine->pump_requests, crypto_pump_work);
323
324	if (engine->rt) {
325		dev_info(dev, "will run requests pump with realtime priority\n");
326		sched_setscheduler(engine->kworker_task, SCHED_FIFO, &param);
327	}
328
329	return engine;
330}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
331EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
332
333/**
334 * crypto_engine_exit - free the resources of hardware engine when exit
335 * @engine: the hardware engine need to be freed
336 *
337 * Return 0 for success.
338 */
339int crypto_engine_exit(struct crypto_engine *engine)
340{
341	int ret;
342
343	ret = crypto_engine_stop(engine);
344	if (ret)
345		return ret;
346
347	flush_kthread_worker(&engine->kworker);
348	kthread_stop(engine->kworker_task);
349
350	return 0;
351}
352EXPORT_SYMBOL_GPL(crypto_engine_exit);
353
354MODULE_LICENSE("GPL");
355MODULE_DESCRIPTION("Crypto hardware engine framework");
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Handle async block request by crypto hardware engine.
  4 *
  5 * Copyright (C) 2016 Linaro, Inc.
  6 *
  7 * Author: Baolin Wang <baolin.wang@linaro.org>
 
 
 
 
 
 
  8 */
  9
 10#include <linux/err.h>
 11#include <linux/delay.h>
 12#include <linux/device.h>
 13#include <crypto/engine.h>
 14#include <uapi/linux/sched/types.h>
 15#include "internal.h"
 16
 17#define CRYPTO_ENGINE_MAX_QLEN 10
 18
 19/**
 20 * crypto_finalize_request - finalize one request if the request is done
 21 * @engine: the hardware engine
 22 * @req: the request need to be finalized
 23 * @err: error number
 24 */
 25static void crypto_finalize_request(struct crypto_engine *engine,
 26				    struct crypto_async_request *req, int err)
 27{
 28	unsigned long flags;
 29	bool finalize_req = false;
 30	int ret;
 31	struct crypto_engine_ctx *enginectx;
 32
 33	/*
 34	 * If hardware cannot enqueue more requests
 35	 * and retry mechanism is not supported
 36	 * make sure we are completing the current request
 37	 */
 38	if (!engine->retry_support) {
 39		spin_lock_irqsave(&engine->queue_lock, flags);
 40		if (engine->cur_req == req) {
 41			finalize_req = true;
 42			engine->cur_req = NULL;
 43		}
 44		spin_unlock_irqrestore(&engine->queue_lock, flags);
 45	}
 46
 47	if (finalize_req || engine->retry_support) {
 48		enginectx = crypto_tfm_ctx(req->tfm);
 49		if (enginectx->op.prepare_request &&
 50		    enginectx->op.unprepare_request) {
 51			ret = enginectx->op.unprepare_request(engine, req);
 52			if (ret)
 53				dev_err(engine->dev, "failed to unprepare request\n");
 54		}
 55	}
 56	req->complete(req, err);
 57
 58	kthread_queue_work(engine->kworker, &engine->pump_requests);
 59}
 60
 61/**
 62 * crypto_pump_requests - dequeue one request from engine queue to process
 63 * @engine: the hardware engine
 64 * @in_kthread: true if we are in the context of the request pump thread
 65 *
 66 * This function checks if there is any request in the engine queue that
 67 * needs processing and if so call out to the driver to initialize hardware
 68 * and handle each request.
 69 */
 70static void crypto_pump_requests(struct crypto_engine *engine,
 71				 bool in_kthread)
 72{
 73	struct crypto_async_request *async_req, *backlog;
 
 74	unsigned long flags;
 75	bool was_busy = false;
 76	int ret;
 77	struct crypto_engine_ctx *enginectx;
 78
 79	spin_lock_irqsave(&engine->queue_lock, flags);
 80
 81	/* Make sure we are not already running a request */
 82	if (!engine->retry_support && engine->cur_req)
 83		goto out;
 84
 85	/* If another context is idling then defer */
 86	if (engine->idling) {
 87		kthread_queue_work(engine->kworker, &engine->pump_requests);
 88		goto out;
 89	}
 90
 91	/* Check if the engine queue is idle */
 92	if (!crypto_queue_len(&engine->queue) || !engine->running) {
 93		if (!engine->busy)
 94			goto out;
 95
 96		/* Only do teardown in the thread */
 97		if (!in_kthread) {
 98			kthread_queue_work(engine->kworker,
 99					   &engine->pump_requests);
100			goto out;
101		}
102
103		engine->busy = false;
104		engine->idling = true;
105		spin_unlock_irqrestore(&engine->queue_lock, flags);
106
107		if (engine->unprepare_crypt_hardware &&
108		    engine->unprepare_crypt_hardware(engine))
109			dev_err(engine->dev, "failed to unprepare crypt hardware\n");
110
111		spin_lock_irqsave(&engine->queue_lock, flags);
112		engine->idling = false;
113		goto out;
114	}
115
116start_request:
117	/* Get the fist request from the engine queue to handle */
118	backlog = crypto_get_backlog(&engine->queue);
119	async_req = crypto_dequeue_request(&engine->queue);
120	if (!async_req)
121		goto out;
122
123	/*
124	 * If hardware doesn't support the retry mechanism,
125	 * keep track of the request we are processing now.
126	 * We'll need it on completion (crypto_finalize_request).
127	 */
128	if (!engine->retry_support)
129		engine->cur_req = async_req;
130
 
131	if (backlog)
132		backlog->complete(backlog, -EINPROGRESS);
133
134	if (engine->busy)
135		was_busy = true;
136	else
137		engine->busy = true;
138
139	spin_unlock_irqrestore(&engine->queue_lock, flags);
140
141	/* Until here we get the request need to be encrypted successfully */
142	if (!was_busy && engine->prepare_crypt_hardware) {
143		ret = engine->prepare_crypt_hardware(engine);
144		if (ret) {
145			dev_err(engine->dev, "failed to prepare crypt hardware\n");
146			goto req_err_2;
147		}
148	}
149
150	enginectx = crypto_tfm_ctx(async_req->tfm);
151
152	if (enginectx->op.prepare_request) {
153		ret = enginectx->op.prepare_request(engine, async_req);
154		if (ret) {
155			dev_err(engine->dev, "failed to prepare request: %d\n",
156				ret);
157			goto req_err_2;
158		}
 
159	}
160	if (!enginectx->op.do_one_request) {
161		dev_err(engine->dev, "failed to do request\n");
162		ret = -EINVAL;
163		goto req_err_1;
164	}
165
166	ret = enginectx->op.do_one_request(engine, async_req);
167
168	/* Request unsuccessfully executed by hardware */
169	if (ret < 0) {
170		/*
171		 * If hardware queue is full (-ENOSPC), requeue request
172		 * regardless of backlog flag.
173		 * Otherwise, unprepare and complete the request.
174		 */
175		if (!engine->retry_support ||
176		    (ret != -ENOSPC)) {
177			dev_err(engine->dev,
178				"Failed to do one request from queue: %d\n",
179				ret);
180			goto req_err_1;
181		}
182		/*
183		 * If retry mechanism is supported,
184		 * unprepare current request and
185		 * enqueue it back into crypto-engine queue.
186		 */
187		if (enginectx->op.unprepare_request) {
188			ret = enginectx->op.unprepare_request(engine,
189							      async_req);
190			if (ret)
191				dev_err(engine->dev,
192					"failed to unprepare request\n");
193		}
194		spin_lock_irqsave(&engine->queue_lock, flags);
195		/*
196		 * If hardware was unable to execute request, enqueue it
197		 * back in front of crypto-engine queue, to keep the order
198		 * of requests.
199		 */
200		crypto_enqueue_request_head(&engine->queue, async_req);
201
202		kthread_queue_work(engine->kworker, &engine->pump_requests);
203		goto out;
204	}
205
206	goto retry;
207
208req_err_1:
209	if (enginectx->op.unprepare_request) {
210		ret = enginectx->op.unprepare_request(engine, async_req);
211		if (ret)
212			dev_err(engine->dev, "failed to unprepare request\n");
213	}
 
214
215req_err_2:
216	async_req->complete(async_req, ret);
217
218retry:
219	/* If retry mechanism is supported, send new requests to engine */
220	if (engine->retry_support) {
221		spin_lock_irqsave(&engine->queue_lock, flags);
222		goto start_request;
223	}
224	return;
225
226out:
227	spin_unlock_irqrestore(&engine->queue_lock, flags);
228
229	/*
230	 * Batch requests is possible only if
231	 * hardware can enqueue multiple requests
232	 */
233	if (engine->do_batch_requests) {
234		ret = engine->do_batch_requests(engine);
235		if (ret)
236			dev_err(engine->dev, "failed to do batch requests: %d\n",
237				ret);
238	}
239
240	return;
241}
242
243static void crypto_pump_work(struct kthread_work *work)
244{
245	struct crypto_engine *engine =
246		container_of(work, struct crypto_engine, pump_requests);
247
248	crypto_pump_requests(engine, true);
249}
250
251/**
252 * crypto_transfer_request - transfer the new request into the engine queue
253 * @engine: the hardware engine
254 * @req: the request need to be listed into the engine queue
255 */
256static int crypto_transfer_request(struct crypto_engine *engine,
257				   struct crypto_async_request *req,
258				   bool need_pump)
259{
260	unsigned long flags;
261	int ret;
262
263	spin_lock_irqsave(&engine->queue_lock, flags);
264
265	if (!engine->running) {
266		spin_unlock_irqrestore(&engine->queue_lock, flags);
267		return -ESHUTDOWN;
268	}
269
270	ret = crypto_enqueue_request(&engine->queue, req);
271
272	if (!engine->busy && need_pump)
273		kthread_queue_work(engine->kworker, &engine->pump_requests);
274
275	spin_unlock_irqrestore(&engine->queue_lock, flags);
276	return ret;
277}
 
278
279/**
280 * crypto_transfer_request_to_engine - transfer one request to list
281 * into the engine queue
282 * @engine: the hardware engine
283 * @req: the request need to be listed into the engine queue
284 */
285static int crypto_transfer_request_to_engine(struct crypto_engine *engine,
286					     struct crypto_async_request *req)
287{
288	return crypto_transfer_request(engine, req, true);
289}
 
290
291/**
292 * crypto_transfer_aead_request_to_engine - transfer one aead_request
293 * to list into the engine queue
294 * @engine: the hardware engine
295 * @req: the request need to be listed into the engine queue
 
296 */
297int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine,
298					   struct aead_request *req)
299{
300	return crypto_transfer_request_to_engine(engine, &req->base);
301}
302EXPORT_SYMBOL_GPL(crypto_transfer_aead_request_to_engine);
303
304/**
305 * crypto_transfer_akcipher_request_to_engine - transfer one akcipher_request
306 * to list into the engine queue
307 * @engine: the hardware engine
308 * @req: the request need to be listed into the engine queue
309 */
310int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
311					       struct akcipher_request *req)
312{
313	return crypto_transfer_request_to_engine(engine, &req->base);
314}
315EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_request_to_engine);
316
317/**
318 * crypto_transfer_hash_request_to_engine - transfer one ahash_request
319 * to list into the engine queue
320 * @engine: the hardware engine
321 * @req: the request need to be listed into the engine queue
322 */
323int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
324					   struct ahash_request *req)
325{
326	return crypto_transfer_request_to_engine(engine, &req->base);
327}
328EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
329
330/**
331 * crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request
332 * to list into the engine queue
333 * @engine: the hardware engine
334 * @req: the request need to be listed into the engine queue
335 */
336int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
337					       struct skcipher_request *req)
338{
339	return crypto_transfer_request_to_engine(engine, &req->base);
340}
341EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine);
342
343/**
344 * crypto_finalize_aead_request - finalize one aead_request if
345 * the request is done
346 * @engine: the hardware engine
347 * @req: the request need to be finalized
348 * @err: error number
349 */
350void crypto_finalize_aead_request(struct crypto_engine *engine,
351				  struct aead_request *req, int err)
352{
353	return crypto_finalize_request(engine, &req->base, err);
354}
355EXPORT_SYMBOL_GPL(crypto_finalize_aead_request);
356
357/**
358 * crypto_finalize_akcipher_request - finalize one akcipher_request if
359 * the request is done
360 * @engine: the hardware engine
361 * @req: the request need to be finalized
362 * @err: error number
363 */
364void crypto_finalize_akcipher_request(struct crypto_engine *engine,
365				      struct akcipher_request *req, int err)
366{
367	return crypto_finalize_request(engine, &req->base, err);
368}
369EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_request);
370
371/**
372 * crypto_finalize_hash_request - finalize one ahash_request if
373 * the request is done
374 * @engine: the hardware engine
375 * @req: the request need to be finalized
376 * @err: error number
377 */
378void crypto_finalize_hash_request(struct crypto_engine *engine,
379				  struct ahash_request *req, int err)
380{
381	return crypto_finalize_request(engine, &req->base, err);
382}
383EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
384
385/**
386 * crypto_finalize_skcipher_request - finalize one skcipher_request if
387 * the request is done
388 * @engine: the hardware engine
389 * @req: the request need to be finalized
390 * @err: error number
391 */
392void crypto_finalize_skcipher_request(struct crypto_engine *engine,
393				      struct skcipher_request *req, int err)
394{
395	return crypto_finalize_request(engine, &req->base, err);
396}
397EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_request);
398
399/**
400 * crypto_engine_start - start the hardware engine
401 * @engine: the hardware engine need to be started
402 *
403 * Return 0 on success, else on fail.
404 */
405int crypto_engine_start(struct crypto_engine *engine)
406{
407	unsigned long flags;
408
409	spin_lock_irqsave(&engine->queue_lock, flags);
410
411	if (engine->running || engine->busy) {
412		spin_unlock_irqrestore(&engine->queue_lock, flags);
413		return -EBUSY;
414	}
415
416	engine->running = true;
417	spin_unlock_irqrestore(&engine->queue_lock, flags);
418
419	kthread_queue_work(engine->kworker, &engine->pump_requests);
420
421	return 0;
422}
423EXPORT_SYMBOL_GPL(crypto_engine_start);
424
425/**
426 * crypto_engine_stop - stop the hardware engine
427 * @engine: the hardware engine need to be stopped
428 *
429 * Return 0 on success, else on fail.
430 */
431int crypto_engine_stop(struct crypto_engine *engine)
432{
433	unsigned long flags;
434	unsigned int limit = 500;
435	int ret = 0;
436
437	spin_lock_irqsave(&engine->queue_lock, flags);
438
439	/*
440	 * If the engine queue is not empty or the engine is on busy state,
441	 * we need to wait for a while to pump the requests of engine queue.
442	 */
443	while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
444		spin_unlock_irqrestore(&engine->queue_lock, flags);
445		msleep(20);
446		spin_lock_irqsave(&engine->queue_lock, flags);
447	}
448
449	if (crypto_queue_len(&engine->queue) || engine->busy)
450		ret = -EBUSY;
451	else
452		engine->running = false;
453
454	spin_unlock_irqrestore(&engine->queue_lock, flags);
455
456	if (ret)
457		dev_warn(engine->dev, "could not stop engine\n");
458
459	return ret;
460}
461EXPORT_SYMBOL_GPL(crypto_engine_stop);
462
463/**
464 * crypto_engine_alloc_init_and_set - allocate crypto hardware engine structure
465 * and initialize it by setting the maximum number of entries in the software
466 * crypto-engine queue.
467 * @dev: the device attached with one hardware engine
468 * @retry_support: whether hardware has support for retry mechanism
469 * @cbk_do_batch: pointer to a callback function to be invoked when executing
470 *                a batch of requests.
471 *                This has the form:
472 *                callback(struct crypto_engine *engine)
473 *                where:
474 *                @engine: the crypto engine structure.
475 * @rt: whether this queue is set to run as a realtime task
476 * @qlen: maximum size of the crypto-engine queue
477 *
478 * This must be called from context that can sleep.
479 * Return: the crypto engine structure on success, else NULL.
480 */
481struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
482						       bool retry_support,
483						       int (*cbk_do_batch)(struct crypto_engine *engine),
484						       bool rt, int qlen)
485{
 
486	struct crypto_engine *engine;
487
488	if (!dev)
489		return NULL;
490
491	engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
492	if (!engine)
493		return NULL;
494
495	engine->dev = dev;
496	engine->rt = rt;
497	engine->running = false;
498	engine->busy = false;
499	engine->idling = false;
500	engine->retry_support = retry_support;
501	engine->priv_data = dev;
502	/*
503	 * Batch requests is possible only if
504	 * hardware has support for retry mechanism.
505	 */
506	engine->do_batch_requests = retry_support ? cbk_do_batch : NULL;
507
508	snprintf(engine->name, sizeof(engine->name),
509		 "%s-engine", dev_name(dev));
510
511	crypto_init_queue(&engine->queue, qlen);
512	spin_lock_init(&engine->queue_lock);
513
514	engine->kworker = kthread_create_worker(0, "%s", engine->name);
515	if (IS_ERR(engine->kworker)) {
 
 
 
516		dev_err(dev, "failed to create crypto request pump task\n");
517		return NULL;
518	}
519	kthread_init_work(&engine->pump_requests, crypto_pump_work);
520
521	if (engine->rt) {
522		dev_info(dev, "will run requests pump with realtime priority\n");
523		sched_set_fifo(engine->kworker->task);
524	}
525
526	return engine;
527}
528EXPORT_SYMBOL_GPL(crypto_engine_alloc_init_and_set);
529
530/**
531 * crypto_engine_alloc_init - allocate crypto hardware engine structure and
532 * initialize it.
533 * @dev: the device attached with one hardware engine
534 * @rt: whether this queue is set to run as a realtime task
535 *
536 * This must be called from context that can sleep.
537 * Return: the crypto engine structure on success, else NULL.
538 */
539struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
540{
541	return crypto_engine_alloc_init_and_set(dev, false, NULL, rt,
542						CRYPTO_ENGINE_MAX_QLEN);
543}
544EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
545
546/**
547 * crypto_engine_exit - free the resources of hardware engine when exit
548 * @engine: the hardware engine need to be freed
549 *
550 * Return 0 for success.
551 */
552int crypto_engine_exit(struct crypto_engine *engine)
553{
554	int ret;
555
556	ret = crypto_engine_stop(engine);
557	if (ret)
558		return ret;
559
560	kthread_destroy_worker(engine->kworker);
 
561
562	return 0;
563}
564EXPORT_SYMBOL_GPL(crypto_engine_exit);
565
566MODULE_LICENSE("GPL");
567MODULE_DESCRIPTION("Crypto hardware engine framework");