Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 * AMD Cryptographic Coprocessor (CCP) crypto API support
  3 *
  4 * Copyright (C) 2013 Advanced Micro Devices, Inc.
  5 *
  6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
  7 *
  8 * This program is free software; you can redistribute it and/or modify
  9 * it under the terms of the GNU General Public License version 2 as
 10 * published by the Free Software Foundation.
 11 */
 12
 13#include <linux/module.h>
 14#include <linux/moduleparam.h>
 15#include <linux/kernel.h>
 16#include <linux/list.h>
 17#include <linux/ccp.h>
 18#include <linux/scatterlist.h>
 19#include <crypto/internal/hash.h>
 
 20
 21#include "ccp-crypto.h"
 22
 23MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
 24MODULE_LICENSE("GPL");
 25MODULE_VERSION("1.0.0");
 26MODULE_DESCRIPTION("AMD Cryptographic Coprocessor crypto API support");
 27
 28static unsigned int aes_disable;
 29module_param(aes_disable, uint, 0444);
 30MODULE_PARM_DESC(aes_disable, "Disable use of AES - any non-zero value");
 31
 32static unsigned int sha_disable;
 33module_param(sha_disable, uint, 0444);
 34MODULE_PARM_DESC(sha_disable, "Disable use of SHA - any non-zero value");
 35
 
 
 
 
 
 
 
 
 36/* List heads for the supported algorithms */
 37static LIST_HEAD(hash_algs);
 38static LIST_HEAD(cipher_algs);
 
 
 39
 40/* For any tfm, requests for that tfm must be returned on the order
 41 * received.  With multiple queues available, the CCP can process more
 42 * than one cmd at a time.  Therefore we must maintain a cmd list to insure
 43 * the proper ordering of requests on a given tfm.
 44 */
 45struct ccp_crypto_queue {
 46	struct list_head cmds;
 47	struct list_head *backlog;
 48	unsigned int cmd_count;
 49};
 50
 51#define CCP_CRYPTO_MAX_QLEN	100
 52
 53static struct ccp_crypto_queue req_queue;
 54static spinlock_t req_queue_lock;
 55
 56struct ccp_crypto_cmd {
 57	struct list_head entry;
 58
 59	struct ccp_cmd *cmd;
 60
 61	/* Save the crypto_tfm and crypto_async_request addresses
 62	 * separately to avoid any reference to a possibly invalid
 63	 * crypto_async_request structure after invoking the request
 64	 * callback
 65	 */
 66	struct crypto_async_request *req;
 67	struct crypto_tfm *tfm;
 68
 69	/* Used for held command processing to determine state */
 70	int ret;
 71};
 72
 73struct ccp_crypto_cpu {
 74	struct work_struct work;
 75	struct completion completion;
 76	struct ccp_crypto_cmd *crypto_cmd;
 77	int err;
 78};
 79
 80static inline bool ccp_crypto_success(int err)
 81{
 82	if (err && (err != -EINPROGRESS) && (err != -EBUSY))
 83		return false;
 84
 85	return true;
 86}
 87
 88static struct ccp_crypto_cmd *ccp_crypto_cmd_complete(
 89	struct ccp_crypto_cmd *crypto_cmd, struct ccp_crypto_cmd **backlog)
 90{
 91	struct ccp_crypto_cmd *held = NULL, *tmp;
 92	unsigned long flags;
 93
 94	*backlog = NULL;
 95
 96	spin_lock_irqsave(&req_queue_lock, flags);
 97
 98	/* Held cmds will be after the current cmd in the queue so start
 99	 * searching for a cmd with a matching tfm for submission.
100	 */
101	tmp = crypto_cmd;
102	list_for_each_entry_continue(tmp, &req_queue.cmds, entry) {
103		if (crypto_cmd->tfm != tmp->tfm)
104			continue;
105		held = tmp;
106		break;
107	}
108
109	/* Process the backlog:
110	 *   Because cmds can be executed from any point in the cmd list
111	 *   special precautions have to be taken when handling the backlog.
112	 */
113	if (req_queue.backlog != &req_queue.cmds) {
114		/* Skip over this cmd if it is the next backlog cmd */
115		if (req_queue.backlog == &crypto_cmd->entry)
116			req_queue.backlog = crypto_cmd->entry.next;
117
118		*backlog = container_of(req_queue.backlog,
119					struct ccp_crypto_cmd, entry);
120		req_queue.backlog = req_queue.backlog->next;
121
122		/* Skip over this cmd if it is now the next backlog cmd */
123		if (req_queue.backlog == &crypto_cmd->entry)
124			req_queue.backlog = crypto_cmd->entry.next;
125	}
126
127	/* Remove the cmd entry from the list of cmds */
128	req_queue.cmd_count--;
129	list_del(&crypto_cmd->entry);
130
131	spin_unlock_irqrestore(&req_queue_lock, flags);
132
133	return held;
134}
135
136static void ccp_crypto_complete(void *data, int err)
137{
138	struct ccp_crypto_cmd *crypto_cmd = data;
139	struct ccp_crypto_cmd *held, *next, *backlog;
140	struct crypto_async_request *req = crypto_cmd->req;
141	struct ccp_ctx *ctx = crypto_tfm_ctx(req->tfm);
142	int ret;
143
144	if (err == -EINPROGRESS) {
145		/* Only propagate the -EINPROGRESS if necessary */
146		if (crypto_cmd->ret == -EBUSY) {
147			crypto_cmd->ret = -EINPROGRESS;
148			req->complete(req, -EINPROGRESS);
149		}
150
151		return;
152	}
153
154	/* Operation has completed - update the queue before invoking
155	 * the completion callbacks and retrieve the next cmd (cmd with
156	 * a matching tfm) that can be submitted to the CCP.
157	 */
158	held = ccp_crypto_cmd_complete(crypto_cmd, &backlog);
159	if (backlog) {
160		backlog->ret = -EINPROGRESS;
161		backlog->req->complete(backlog->req, -EINPROGRESS);
162	}
163
164	/* Transition the state from -EBUSY to -EINPROGRESS first */
165	if (crypto_cmd->ret == -EBUSY)
166		req->complete(req, -EINPROGRESS);
167
168	/* Completion callbacks */
169	ret = err;
170	if (ctx->complete)
171		ret = ctx->complete(req, ret);
172	req->complete(req, ret);
173
174	/* Submit the next cmd */
175	while (held) {
176		/* Since we have already queued the cmd, we must indicate that
177		 * we can backlog so as not to "lose" this request.
178		 */
179		held->cmd->flags |= CCP_CMD_MAY_BACKLOG;
180		ret = ccp_enqueue_cmd(held->cmd);
181		if (ccp_crypto_success(ret))
182			break;
183
184		/* Error occurred, report it and get the next entry */
185		ctx = crypto_tfm_ctx(held->req->tfm);
186		if (ctx->complete)
187			ret = ctx->complete(held->req, ret);
188		held->req->complete(held->req, ret);
189
190		next = ccp_crypto_cmd_complete(held, &backlog);
191		if (backlog) {
192			backlog->ret = -EINPROGRESS;
193			backlog->req->complete(backlog->req, -EINPROGRESS);
194		}
195
196		kfree(held);
197		held = next;
198	}
199
200	kfree(crypto_cmd);
201}
202
203static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd)
204{
205	struct ccp_crypto_cmd *active = NULL, *tmp;
206	unsigned long flags;
207	bool free_cmd = true;
208	int ret;
209
210	spin_lock_irqsave(&req_queue_lock, flags);
211
212	/* Check if the cmd can/should be queued */
213	if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) {
214		ret = -EBUSY;
215		if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG))
216			goto e_lock;
 
217	}
218
219	/* Look for an entry with the same tfm.  If there is a cmd
220	 * with the same tfm in the list then the current cmd cannot
221	 * be submitted to the CCP yet.
222	 */
223	list_for_each_entry(tmp, &req_queue.cmds, entry) {
224		if (crypto_cmd->tfm != tmp->tfm)
225			continue;
226		active = tmp;
227		break;
228	}
229
230	ret = -EINPROGRESS;
231	if (!active) {
232		ret = ccp_enqueue_cmd(crypto_cmd->cmd);
233		if (!ccp_crypto_success(ret))
234			goto e_lock;	/* Error, don't queue it */
235		if ((ret == -EBUSY) &&
236		    !(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG))
237			goto e_lock;	/* Not backlogging, don't queue it */
238	}
239
240	if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) {
241		ret = -EBUSY;
242		if (req_queue.backlog == &req_queue.cmds)
243			req_queue.backlog = &crypto_cmd->entry;
244	}
245	crypto_cmd->ret = ret;
246
247	req_queue.cmd_count++;
248	list_add_tail(&crypto_cmd->entry, &req_queue.cmds);
249
250	free_cmd = false;
251
252e_lock:
253	spin_unlock_irqrestore(&req_queue_lock, flags);
254
255	if (free_cmd)
256		kfree(crypto_cmd);
257
258	return ret;
259}
260
261/**
262 * ccp_crypto_enqueue_request - queue an crypto async request for processing
263 *				by the CCP
264 *
265 * @req: crypto_async_request struct to be processed
266 * @cmd: ccp_cmd struct to be sent to the CCP
267 */
268int ccp_crypto_enqueue_request(struct crypto_async_request *req,
269			       struct ccp_cmd *cmd)
270{
271	struct ccp_crypto_cmd *crypto_cmd;
272	gfp_t gfp;
273
274	gfp = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
275
276	crypto_cmd = kzalloc(sizeof(*crypto_cmd), gfp);
277	if (!crypto_cmd)
278		return -ENOMEM;
279
280	/* The tfm pointer must be saved and not referenced from the
281	 * crypto_async_request (req) pointer because it is used after
282	 * completion callback for the request and the req pointer
283	 * might not be valid anymore.
284	 */
285	crypto_cmd->cmd = cmd;
286	crypto_cmd->req = req;
287	crypto_cmd->tfm = req->tfm;
288
289	cmd->callback = ccp_crypto_complete;
290	cmd->data = crypto_cmd;
291
292	if (req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
293		cmd->flags |= CCP_CMD_MAY_BACKLOG;
294	else
295		cmd->flags &= ~CCP_CMD_MAY_BACKLOG;
296
297	return ccp_crypto_enqueue_cmd(crypto_cmd);
298}
299
300struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table,
301					    struct scatterlist *sg_add)
302{
303	struct scatterlist *sg, *sg_last = NULL;
304
305	for (sg = table->sgl; sg; sg = sg_next(sg))
306		if (!sg_page(sg))
307			break;
308	if (WARN_ON(!sg))
309		return NULL;
310
311	for (; sg && sg_add; sg = sg_next(sg), sg_add = sg_next(sg_add)) {
312		sg_set_page(sg, sg_page(sg_add), sg_add->length,
313			    sg_add->offset);
314		sg_last = sg;
315	}
316	if (WARN_ON(sg_add))
317		return NULL;
318
319	return sg_last;
320}
321
322static int ccp_register_algs(void)
323{
324	int ret;
325
326	if (!aes_disable) {
327		ret = ccp_register_aes_algs(&cipher_algs);
328		if (ret)
329			return ret;
330
331		ret = ccp_register_aes_cmac_algs(&hash_algs);
332		if (ret)
333			return ret;
334
335		ret = ccp_register_aes_xts_algs(&cipher_algs);
 
 
 
 
 
 
 
 
 
 
336		if (ret)
337			return ret;
338	}
339
340	if (!sha_disable) {
341		ret = ccp_register_sha_algs(&hash_algs);
342		if (ret)
343			return ret;
344	}
345
 
 
 
 
 
 
346	return 0;
347}
348
349static void ccp_unregister_algs(void)
350{
351	struct ccp_crypto_ahash_alg *ahash_alg, *ahash_tmp;
352	struct ccp_crypto_ablkcipher_alg *ablk_alg, *ablk_tmp;
 
 
353
354	list_for_each_entry_safe(ahash_alg, ahash_tmp, &hash_algs, entry) {
355		crypto_unregister_ahash(&ahash_alg->alg);
356		list_del(&ahash_alg->entry);
357		kfree(ahash_alg);
358	}
359
360	list_for_each_entry_safe(ablk_alg, ablk_tmp, &cipher_algs, entry) {
361		crypto_unregister_alg(&ablk_alg->alg);
362		list_del(&ablk_alg->entry);
363		kfree(ablk_alg);
364	}
 
 
 
 
 
 
 
 
 
 
 
 
365}
366
367static int ccp_crypto_init(void)
368{
369	int ret;
370
371	ret = ccp_present();
372	if (ret)
 
373		return ret;
 
374
375	spin_lock_init(&req_queue_lock);
376	INIT_LIST_HEAD(&req_queue.cmds);
377	req_queue.backlog = &req_queue.cmds;
378	req_queue.cmd_count = 0;
379
380	ret = ccp_register_algs();
381	if (ret)
382		ccp_unregister_algs();
383
384	return ret;
385}
386
387static void ccp_crypto_exit(void)
388{
389	ccp_unregister_algs();
390}
391
392module_init(ccp_crypto_init);
393module_exit(ccp_crypto_exit);
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * AMD Cryptographic Coprocessor (CCP) crypto API support
  4 *
  5 * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
  6 *
  7 * Author: Tom Lendacky <thomas.lendacky@amd.com>
 
 
 
 
  8 */
  9
 10#include <linux/module.h>
 11#include <linux/moduleparam.h>
 12#include <linux/kernel.h>
 13#include <linux/list.h>
 14#include <linux/ccp.h>
 15#include <linux/scatterlist.h>
 16#include <crypto/internal/hash.h>
 17#include <crypto/internal/akcipher.h>
 18
 19#include "ccp-crypto.h"
 20
 21MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
 22MODULE_LICENSE("GPL");
 23MODULE_VERSION("1.0.0");
 24MODULE_DESCRIPTION("AMD Cryptographic Coprocessor crypto API support");
 25
 26static unsigned int aes_disable;
 27module_param(aes_disable, uint, 0444);
 28MODULE_PARM_DESC(aes_disable, "Disable use of AES - any non-zero value");
 29
 30static unsigned int sha_disable;
 31module_param(sha_disable, uint, 0444);
 32MODULE_PARM_DESC(sha_disable, "Disable use of SHA - any non-zero value");
 33
 34static unsigned int des3_disable;
 35module_param(des3_disable, uint, 0444);
 36MODULE_PARM_DESC(des3_disable, "Disable use of 3DES - any non-zero value");
 37
 38static unsigned int rsa_disable;
 39module_param(rsa_disable, uint, 0444);
 40MODULE_PARM_DESC(rsa_disable, "Disable use of RSA - any non-zero value");
 41
 42/* List heads for the supported algorithms */
 43static LIST_HEAD(hash_algs);
 44static LIST_HEAD(skcipher_algs);
 45static LIST_HEAD(aead_algs);
 46static LIST_HEAD(akcipher_algs);
 47
 48/* For any tfm, requests for that tfm must be returned on the order
 49 * received.  With multiple queues available, the CCP can process more
 50 * than one cmd at a time.  Therefore we must maintain a cmd list to insure
 51 * the proper ordering of requests on a given tfm.
 52 */
 53struct ccp_crypto_queue {
 54	struct list_head cmds;
 55	struct list_head *backlog;
 56	unsigned int cmd_count;
 57};
 58
 59#define CCP_CRYPTO_MAX_QLEN	100
 60
 61static struct ccp_crypto_queue req_queue;
 62static DEFINE_SPINLOCK(req_queue_lock);
 63
 64struct ccp_crypto_cmd {
 65	struct list_head entry;
 66
 67	struct ccp_cmd *cmd;
 68
 69	/* Save the crypto_tfm and crypto_async_request addresses
 70	 * separately to avoid any reference to a possibly invalid
 71	 * crypto_async_request structure after invoking the request
 72	 * callback
 73	 */
 74	struct crypto_async_request *req;
 75	struct crypto_tfm *tfm;
 76
 77	/* Used for held command processing to determine state */
 78	int ret;
 79};
 80
 
 
 
 
 
 
 
 81static inline bool ccp_crypto_success(int err)
 82{
 83	if (err && (err != -EINPROGRESS) && (err != -EBUSY))
 84		return false;
 85
 86	return true;
 87}
 88
 89static struct ccp_crypto_cmd *ccp_crypto_cmd_complete(
 90	struct ccp_crypto_cmd *crypto_cmd, struct ccp_crypto_cmd **backlog)
 91{
 92	struct ccp_crypto_cmd *held = NULL, *tmp;
 93	unsigned long flags;
 94
 95	*backlog = NULL;
 96
 97	spin_lock_irqsave(&req_queue_lock, flags);
 98
 99	/* Held cmds will be after the current cmd in the queue so start
100	 * searching for a cmd with a matching tfm for submission.
101	 */
102	tmp = crypto_cmd;
103	list_for_each_entry_continue(tmp, &req_queue.cmds, entry) {
104		if (crypto_cmd->tfm != tmp->tfm)
105			continue;
106		held = tmp;
107		break;
108	}
109
110	/* Process the backlog:
111	 *   Because cmds can be executed from any point in the cmd list
112	 *   special precautions have to be taken when handling the backlog.
113	 */
114	if (req_queue.backlog != &req_queue.cmds) {
115		/* Skip over this cmd if it is the next backlog cmd */
116		if (req_queue.backlog == &crypto_cmd->entry)
117			req_queue.backlog = crypto_cmd->entry.next;
118
119		*backlog = container_of(req_queue.backlog,
120					struct ccp_crypto_cmd, entry);
121		req_queue.backlog = req_queue.backlog->next;
122
123		/* Skip over this cmd if it is now the next backlog cmd */
124		if (req_queue.backlog == &crypto_cmd->entry)
125			req_queue.backlog = crypto_cmd->entry.next;
126	}
127
128	/* Remove the cmd entry from the list of cmds */
129	req_queue.cmd_count--;
130	list_del(&crypto_cmd->entry);
131
132	spin_unlock_irqrestore(&req_queue_lock, flags);
133
134	return held;
135}
136
137static void ccp_crypto_complete(void *data, int err)
138{
139	struct ccp_crypto_cmd *crypto_cmd = data;
140	struct ccp_crypto_cmd *held, *next, *backlog;
141	struct crypto_async_request *req = crypto_cmd->req;
142	struct ccp_ctx *ctx = crypto_tfm_ctx_dma(req->tfm);
143	int ret;
144
145	if (err == -EINPROGRESS) {
146		/* Only propagate the -EINPROGRESS if necessary */
147		if (crypto_cmd->ret == -EBUSY) {
148			crypto_cmd->ret = -EINPROGRESS;
149			req->complete(req, -EINPROGRESS);
150		}
151
152		return;
153	}
154
155	/* Operation has completed - update the queue before invoking
156	 * the completion callbacks and retrieve the next cmd (cmd with
157	 * a matching tfm) that can be submitted to the CCP.
158	 */
159	held = ccp_crypto_cmd_complete(crypto_cmd, &backlog);
160	if (backlog) {
161		backlog->ret = -EINPROGRESS;
162		backlog->req->complete(backlog->req, -EINPROGRESS);
163	}
164
165	/* Transition the state from -EBUSY to -EINPROGRESS first */
166	if (crypto_cmd->ret == -EBUSY)
167		req->complete(req, -EINPROGRESS);
168
169	/* Completion callbacks */
170	ret = err;
171	if (ctx->complete)
172		ret = ctx->complete(req, ret);
173	req->complete(req, ret);
174
175	/* Submit the next cmd */
176	while (held) {
177		/* Since we have already queued the cmd, we must indicate that
178		 * we can backlog so as not to "lose" this request.
179		 */
180		held->cmd->flags |= CCP_CMD_MAY_BACKLOG;
181		ret = ccp_enqueue_cmd(held->cmd);
182		if (ccp_crypto_success(ret))
183			break;
184
185		/* Error occurred, report it and get the next entry */
186		ctx = crypto_tfm_ctx_dma(held->req->tfm);
187		if (ctx->complete)
188			ret = ctx->complete(held->req, ret);
189		held->req->complete(held->req, ret);
190
191		next = ccp_crypto_cmd_complete(held, &backlog);
192		if (backlog) {
193			backlog->ret = -EINPROGRESS;
194			backlog->req->complete(backlog->req, -EINPROGRESS);
195		}
196
197		kfree(held);
198		held = next;
199	}
200
201	kfree(crypto_cmd);
202}
203
204static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd)
205{
206	struct ccp_crypto_cmd *active = NULL, *tmp;
207	unsigned long flags;
208	bool free_cmd = true;
209	int ret;
210
211	spin_lock_irqsave(&req_queue_lock, flags);
212
213	/* Check if the cmd can/should be queued */
214	if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) {
215		if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG)) {
216			ret = -ENOSPC;
217			goto e_lock;
218		}
219	}
220
221	/* Look for an entry with the same tfm.  If there is a cmd
222	 * with the same tfm in the list then the current cmd cannot
223	 * be submitted to the CCP yet.
224	 */
225	list_for_each_entry(tmp, &req_queue.cmds, entry) {
226		if (crypto_cmd->tfm != tmp->tfm)
227			continue;
228		active = tmp;
229		break;
230	}
231
232	ret = -EINPROGRESS;
233	if (!active) {
234		ret = ccp_enqueue_cmd(crypto_cmd->cmd);
235		if (!ccp_crypto_success(ret))
236			goto e_lock;	/* Error, don't queue it */
 
 
 
237	}
238
239	if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) {
240		ret = -EBUSY;
241		if (req_queue.backlog == &req_queue.cmds)
242			req_queue.backlog = &crypto_cmd->entry;
243	}
244	crypto_cmd->ret = ret;
245
246	req_queue.cmd_count++;
247	list_add_tail(&crypto_cmd->entry, &req_queue.cmds);
248
249	free_cmd = false;
250
251e_lock:
252	spin_unlock_irqrestore(&req_queue_lock, flags);
253
254	if (free_cmd)
255		kfree(crypto_cmd);
256
257	return ret;
258}
259
260/**
261 * ccp_crypto_enqueue_request - queue an crypto async request for processing
262 *				by the CCP
263 *
264 * @req: crypto_async_request struct to be processed
265 * @cmd: ccp_cmd struct to be sent to the CCP
266 */
267int ccp_crypto_enqueue_request(struct crypto_async_request *req,
268			       struct ccp_cmd *cmd)
269{
270	struct ccp_crypto_cmd *crypto_cmd;
271	gfp_t gfp;
272
273	gfp = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
274
275	crypto_cmd = kzalloc(sizeof(*crypto_cmd), gfp);
276	if (!crypto_cmd)
277		return -ENOMEM;
278
279	/* The tfm pointer must be saved and not referenced from the
280	 * crypto_async_request (req) pointer because it is used after
281	 * completion callback for the request and the req pointer
282	 * might not be valid anymore.
283	 */
284	crypto_cmd->cmd = cmd;
285	crypto_cmd->req = req;
286	crypto_cmd->tfm = req->tfm;
287
288	cmd->callback = ccp_crypto_complete;
289	cmd->data = crypto_cmd;
290
291	if (req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
292		cmd->flags |= CCP_CMD_MAY_BACKLOG;
293	else
294		cmd->flags &= ~CCP_CMD_MAY_BACKLOG;
295
296	return ccp_crypto_enqueue_cmd(crypto_cmd);
297}
298
299struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table,
300					    struct scatterlist *sg_add)
301{
302	struct scatterlist *sg, *sg_last = NULL;
303
304	for (sg = table->sgl; sg; sg = sg_next(sg))
305		if (!sg_page(sg))
306			break;
307	if (WARN_ON(!sg))
308		return NULL;
309
310	for (; sg && sg_add; sg = sg_next(sg), sg_add = sg_next(sg_add)) {
311		sg_set_page(sg, sg_page(sg_add), sg_add->length,
312			    sg_add->offset);
313		sg_last = sg;
314	}
315	if (WARN_ON(sg_add))
316		return NULL;
317
318	return sg_last;
319}
320
321static int ccp_register_algs(void)
322{
323	int ret;
324
325	if (!aes_disable) {
326		ret = ccp_register_aes_algs(&skcipher_algs);
327		if (ret)
328			return ret;
329
330		ret = ccp_register_aes_cmac_algs(&hash_algs);
331		if (ret)
332			return ret;
333
334		ret = ccp_register_aes_xts_algs(&skcipher_algs);
335		if (ret)
336			return ret;
337
338		ret = ccp_register_aes_aeads(&aead_algs);
339		if (ret)
340			return ret;
341	}
342
343	if (!des3_disable) {
344		ret = ccp_register_des3_algs(&skcipher_algs);
345		if (ret)
346			return ret;
347	}
348
349	if (!sha_disable) {
350		ret = ccp_register_sha_algs(&hash_algs);
351		if (ret)
352			return ret;
353	}
354
355	if (!rsa_disable) {
356		ret = ccp_register_rsa_algs(&akcipher_algs);
357		if (ret)
358			return ret;
359	}
360
361	return 0;
362}
363
364static void ccp_unregister_algs(void)
365{
366	struct ccp_crypto_ahash_alg *ahash_alg, *ahash_tmp;
367	struct ccp_crypto_skcipher_alg *ablk_alg, *ablk_tmp;
368	struct ccp_crypto_aead *aead_alg, *aead_tmp;
369	struct ccp_crypto_akcipher_alg *akc_alg, *akc_tmp;
370
371	list_for_each_entry_safe(ahash_alg, ahash_tmp, &hash_algs, entry) {
372		crypto_unregister_ahash(&ahash_alg->alg);
373		list_del(&ahash_alg->entry);
374		kfree(ahash_alg);
375	}
376
377	list_for_each_entry_safe(ablk_alg, ablk_tmp, &skcipher_algs, entry) {
378		crypto_unregister_skcipher(&ablk_alg->alg);
379		list_del(&ablk_alg->entry);
380		kfree(ablk_alg);
381	}
382
383	list_for_each_entry_safe(aead_alg, aead_tmp, &aead_algs, entry) {
384		crypto_unregister_aead(&aead_alg->alg);
385		list_del(&aead_alg->entry);
386		kfree(aead_alg);
387	}
388
389	list_for_each_entry_safe(akc_alg, akc_tmp, &akcipher_algs, entry) {
390		crypto_unregister_akcipher(&akc_alg->alg);
391		list_del(&akc_alg->entry);
392		kfree(akc_alg);
393	}
394}
395
396static int __init ccp_crypto_init(void)
397{
398	int ret;
399
400	ret = ccp_present();
401	if (ret) {
402		pr_err("Cannot load: there are no available CCPs\n");
403		return ret;
404	}
405
 
406	INIT_LIST_HEAD(&req_queue.cmds);
407	req_queue.backlog = &req_queue.cmds;
408	req_queue.cmd_count = 0;
409
410	ret = ccp_register_algs();
411	if (ret)
412		ccp_unregister_algs();
413
414	return ret;
415}
416
417static void __exit ccp_crypto_exit(void)
418{
419	ccp_unregister_algs();
420}
421
422module_init(ccp_crypto_init);
423module_exit(ccp_crypto_exit);