Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) STMicroelectronics SA 2017
  4 * Author: Fabien Dessenne <fabien.dessenne@st.com>
  5 */
  6
  7#include <linux/bitrev.h>
  8#include <linux/clk.h>
  9#include <linux/crc32poly.h>
 10#include <linux/module.h>
 11#include <linux/mod_devicetable.h>
 12#include <linux/platform_device.h>
 13#include <linux/pm_runtime.h>
 14
 15#include <crypto/internal/hash.h>
 16
 17#include <asm/unaligned.h>
 18
 19#define DRIVER_NAME             "stm32-crc32"
 20#define CHKSUM_DIGEST_SIZE      4
 21#define CHKSUM_BLOCK_SIZE       1
 22
 23/* Registers */
 24#define CRC_DR                  0x00000000
 25#define CRC_CR                  0x00000008
 26#define CRC_INIT                0x00000010
 27#define CRC_POL                 0x00000014
 28
 29/* Registers values */
 30#define CRC_CR_RESET            BIT(0)
 31#define CRC_CR_REV_IN_WORD      (BIT(6) | BIT(5))
 32#define CRC_CR_REV_IN_BYTE      BIT(5)
 33#define CRC_CR_REV_OUT          BIT(7)
 34#define CRC32C_INIT_DEFAULT     0xFFFFFFFF
 35
 36#define CRC_AUTOSUSPEND_DELAY	50
 37
 38static unsigned int burst_size;
 39module_param(burst_size, uint, 0644);
 40MODULE_PARM_DESC(burst_size, "Select burst byte size (0 unlimited)");
 41
 42struct stm32_crc {
 43	struct list_head list;
 44	struct device    *dev;
 45	void __iomem     *regs;
 46	struct clk       *clk;
 47	spinlock_t       lock;
 
 48};
 49
 50struct stm32_crc_list {
 51	struct list_head dev_list;
 52	spinlock_t       lock; /* protect dev_list */
 53};
 54
 55static struct stm32_crc_list crc_list = {
 56	.dev_list = LIST_HEAD_INIT(crc_list.dev_list),
 57	.lock     = __SPIN_LOCK_UNLOCKED(crc_list.lock),
 58};
 59
 60struct stm32_crc_ctx {
 61	u32 key;
 62	u32 poly;
 63};
 64
 65struct stm32_crc_desc_ctx {
 66	u32    partial; /* crc32c: partial in first 4 bytes of that struct */
 
 67};
 68
 69static int stm32_crc32_cra_init(struct crypto_tfm *tfm)
 70{
 71	struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
 72
 73	mctx->key = 0;
 74	mctx->poly = CRC32_POLY_LE;
 75	return 0;
 76}
 77
 78static int stm32_crc32c_cra_init(struct crypto_tfm *tfm)
 79{
 80	struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
 81
 82	mctx->key = CRC32C_INIT_DEFAULT;
 83	mctx->poly = CRC32C_POLY_LE;
 84	return 0;
 85}
 86
 87static int stm32_crc_setkey(struct crypto_shash *tfm, const u8 *key,
 88			    unsigned int keylen)
 89{
 90	struct stm32_crc_ctx *mctx = crypto_shash_ctx(tfm);
 91
 92	if (keylen != sizeof(u32))
 
 93		return -EINVAL;
 
 94
 95	mctx->key = get_unaligned_le32(key);
 96	return 0;
 97}
 98
 99static struct stm32_crc *stm32_crc_get_next_crc(void)
100{
101	struct stm32_crc *crc;
102
103	spin_lock_bh(&crc_list.lock);
104	crc = list_first_entry(&crc_list.dev_list, struct stm32_crc, list);
105	if (crc)
106		list_move_tail(&crc->list, &crc_list.dev_list);
107	spin_unlock_bh(&crc_list.lock);
108
109	return crc;
110}
111
112static int stm32_crc_init(struct shash_desc *desc)
113{
114	struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
115	struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
116	struct stm32_crc *crc;
117	unsigned long flags;
118
119	crc = stm32_crc_get_next_crc();
120	if (!crc)
121		return -ENODEV;
122
123	pm_runtime_get_sync(crc->dev);
 
124
125	spin_lock_irqsave(&crc->lock, flags);
126
127	/* Reset, set key, poly and configure in bit reverse mode */
128	writel_relaxed(bitrev32(mctx->key), crc->regs + CRC_INIT);
129	writel_relaxed(bitrev32(mctx->poly), crc->regs + CRC_POL);
130	writel_relaxed(CRC_CR_RESET | CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT,
131		       crc->regs + CRC_CR);
132
133	/* Store partial result */
134	ctx->partial = readl_relaxed(crc->regs + CRC_DR);
 
135
136	spin_unlock_irqrestore(&crc->lock, flags);
137
138	pm_runtime_mark_last_busy(crc->dev);
139	pm_runtime_put_autosuspend(crc->dev);
140
141	return 0;
142}
143
144static int burst_update(struct shash_desc *desc, const u8 *d8,
145			size_t length)
146{
147	struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
148	struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
149	struct stm32_crc *crc;
150	unsigned long flags;
151
152	crc = stm32_crc_get_next_crc();
153	if (!crc)
154		return -ENODEV;
155
156	pm_runtime_get_sync(crc->dev);
157
158	spin_lock_irqsave(&crc->lock, flags);
159
160	/*
161	 * Restore previously calculated CRC for this context as init value
162	 * Restore polynomial configuration
163	 * Configure in register for word input data,
164	 * Configure out register in reversed bit mode data.
165	 */
166	writel_relaxed(bitrev32(ctx->partial), crc->regs + CRC_INIT);
167	writel_relaxed(bitrev32(mctx->poly), crc->regs + CRC_POL);
168	writel_relaxed(CRC_CR_RESET | CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT,
169		       crc->regs + CRC_CR);
170
171	if (d8 != PTR_ALIGN(d8, sizeof(u32))) {
172		/* Configure for byte data */
173		writel_relaxed(CRC_CR_REV_IN_BYTE | CRC_CR_REV_OUT,
174			       crc->regs + CRC_CR);
175		while (d8 != PTR_ALIGN(d8, sizeof(u32)) && length) {
176			writeb_relaxed(*d8++, crc->regs + CRC_DR);
177			length--;
178		}
179		/* Configure for word data */
180		writel_relaxed(CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT,
181			       crc->regs + CRC_CR);
182	}
183
184	for (; length >= sizeof(u32); d8 += sizeof(u32), length -= sizeof(u32))
185		writel_relaxed(*((u32 *)d8), crc->regs + CRC_DR);
186
187	if (length) {
188		/* Configure for byte data */
189		writel_relaxed(CRC_CR_REV_IN_BYTE | CRC_CR_REV_OUT,
190			       crc->regs + CRC_CR);
191		while (length--)
192			writeb_relaxed(*d8++, crc->regs + CRC_DR);
193	}
194
 
 
 
 
 
195	/* Store partial result */
196	ctx->partial = readl_relaxed(crc->regs + CRC_DR);
197
198	spin_unlock_irqrestore(&crc->lock, flags);
199
200	pm_runtime_mark_last_busy(crc->dev);
201	pm_runtime_put_autosuspend(crc->dev);
202
203	return 0;
204}
205
206static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
207			    unsigned int length)
208{
209	const unsigned int burst_sz = burst_size;
210	unsigned int rem_sz;
211	const u8 *cur;
212	size_t size;
213	int ret;
214
215	if (!burst_sz)
216		return burst_update(desc, d8, length);
217
218	/* Digest first bytes not 32bit aligned at first pass in the loop */
219	size = min(length,
220		   burst_sz + (unsigned int)d8 - ALIGN_DOWN((unsigned int)d8,
221							    sizeof(u32)));
222	for (rem_sz = length, cur = d8; rem_sz;
223	     rem_sz -= size, cur += size, size = min(rem_sz, burst_sz)) {
224		ret = burst_update(desc, cur, size);
225		if (ret)
226			return ret;
227	}
228
 
 
 
 
 
229	return 0;
230}
231
232static int stm32_crc_final(struct shash_desc *desc, u8 *out)
233{
234	struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
235	struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
236
237	/* Send computed CRC */
238	put_unaligned_le32(mctx->poly == CRC32C_POLY_LE ?
239			   ~ctx->partial : ctx->partial, out);
240
241	return 0;
242}
243
244static int stm32_crc_finup(struct shash_desc *desc, const u8 *data,
245			   unsigned int length, u8 *out)
246{
247	return stm32_crc_update(desc, data, length) ?:
248	       stm32_crc_final(desc, out);
249}
250
251static int stm32_crc_digest(struct shash_desc *desc, const u8 *data,
252			    unsigned int length, u8 *out)
253{
254	return stm32_crc_init(desc) ?: stm32_crc_finup(desc, data, length, out);
255}
256
257static unsigned int refcnt;
258static DEFINE_MUTEX(refcnt_lock);
259static struct shash_alg algs[] = {
260	/* CRC-32 */
261	{
262		.setkey         = stm32_crc_setkey,
263		.init           = stm32_crc_init,
264		.update         = stm32_crc_update,
265		.final          = stm32_crc_final,
266		.finup          = stm32_crc_finup,
267		.digest         = stm32_crc_digest,
268		.descsize       = sizeof(struct stm32_crc_desc_ctx),
269		.digestsize     = CHKSUM_DIGEST_SIZE,
270		.base           = {
271			.cra_name               = "crc32",
272			.cra_driver_name        = DRIVER_NAME,
273			.cra_priority           = 200,
274			.cra_flags		= CRYPTO_ALG_OPTIONAL_KEY,
275			.cra_blocksize          = CHKSUM_BLOCK_SIZE,
276			.cra_alignmask          = 3,
277			.cra_ctxsize            = sizeof(struct stm32_crc_ctx),
278			.cra_module             = THIS_MODULE,
279			.cra_init               = stm32_crc32_cra_init,
280		}
281	},
282	/* CRC-32Castagnoli */
283	{
284		.setkey         = stm32_crc_setkey,
285		.init           = stm32_crc_init,
286		.update         = stm32_crc_update,
287		.final          = stm32_crc_final,
288		.finup          = stm32_crc_finup,
289		.digest         = stm32_crc_digest,
290		.descsize       = sizeof(struct stm32_crc_desc_ctx),
291		.digestsize     = CHKSUM_DIGEST_SIZE,
292		.base           = {
293			.cra_name               = "crc32c",
294			.cra_driver_name        = DRIVER_NAME,
295			.cra_priority           = 200,
296			.cra_flags		= CRYPTO_ALG_OPTIONAL_KEY,
297			.cra_blocksize          = CHKSUM_BLOCK_SIZE,
298			.cra_alignmask          = 3,
299			.cra_ctxsize            = sizeof(struct stm32_crc_ctx),
300			.cra_module             = THIS_MODULE,
301			.cra_init               = stm32_crc32c_cra_init,
302		}
303	}
304};
305
306static int stm32_crc_probe(struct platform_device *pdev)
307{
308	struct device *dev = &pdev->dev;
309	struct stm32_crc *crc;
310	int ret;
311
312	crc = devm_kzalloc(dev, sizeof(*crc), GFP_KERNEL);
313	if (!crc)
314		return -ENOMEM;
315
316	crc->dev = dev;
317
318	crc->regs = devm_platform_ioremap_resource(pdev, 0);
319	if (IS_ERR(crc->regs)) {
320		dev_err(dev, "Cannot map CRC IO\n");
321		return PTR_ERR(crc->regs);
322	}
323
324	crc->clk = devm_clk_get(dev, NULL);
325	if (IS_ERR(crc->clk)) {
326		dev_err(dev, "Could not get clock\n");
327		return PTR_ERR(crc->clk);
328	}
329
330	ret = clk_prepare_enable(crc->clk);
331	if (ret) {
332		dev_err(crc->dev, "Failed to enable clock\n");
333		return ret;
334	}
335
336	pm_runtime_set_autosuspend_delay(dev, CRC_AUTOSUSPEND_DELAY);
337	pm_runtime_use_autosuspend(dev);
338
339	pm_runtime_get_noresume(dev);
340	pm_runtime_set_active(dev);
341	pm_runtime_irq_safe(dev);
342	pm_runtime_enable(dev);
343
344	spin_lock_init(&crc->lock);
345
346	platform_set_drvdata(pdev, crc);
347
348	spin_lock(&crc_list.lock);
349	list_add(&crc->list, &crc_list.dev_list);
350	spin_unlock(&crc_list.lock);
351
352	mutex_lock(&refcnt_lock);
353	if (!refcnt) {
354		ret = crypto_register_shashes(algs, ARRAY_SIZE(algs));
355		if (ret) {
356			mutex_unlock(&refcnt_lock);
357			dev_err(dev, "Failed to register\n");
358			clk_disable_unprepare(crc->clk);
359			return ret;
360		}
361	}
362	refcnt++;
363	mutex_unlock(&refcnt_lock);
364
365	dev_info(dev, "Initialized\n");
366
367	pm_runtime_put_sync(dev);
368
369	return 0;
370}
371
372static int stm32_crc_remove(struct platform_device *pdev)
373{
374	struct stm32_crc *crc = platform_get_drvdata(pdev);
375	int ret = pm_runtime_get_sync(crc->dev);
376
377	if (ret < 0)
378		return ret;
379
380	spin_lock(&crc_list.lock);
381	list_del(&crc->list);
382	spin_unlock(&crc_list.lock);
383
384	mutex_lock(&refcnt_lock);
385	if (!--refcnt)
386		crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
387	mutex_unlock(&refcnt_lock);
388
389	pm_runtime_disable(crc->dev);
390	pm_runtime_put_noidle(crc->dev);
391
392	clk_disable_unprepare(crc->clk);
393
394	return 0;
395}
396
397static int __maybe_unused stm32_crc_suspend(struct device *dev)
 
398{
399	struct stm32_crc *crc = dev_get_drvdata(dev);
400	int ret;
401
402	ret = pm_runtime_force_suspend(dev);
403	if (ret)
404		return ret;
405
406	clk_unprepare(crc->clk);
407
408	return 0;
409}
410
411static int __maybe_unused stm32_crc_resume(struct device *dev)
412{
413	struct stm32_crc *crc = dev_get_drvdata(dev);
414	int ret;
415
416	ret = clk_prepare(crc->clk);
417	if (ret) {
418		dev_err(crc->dev, "Failed to prepare clock\n");
419		return ret;
420	}
421
422	return pm_runtime_force_resume(dev);
423}
424
425static int __maybe_unused stm32_crc_runtime_suspend(struct device *dev)
426{
427	struct stm32_crc *crc = dev_get_drvdata(dev);
428
429	clk_disable(crc->clk);
430
431	return 0;
432}
433
434static int __maybe_unused stm32_crc_runtime_resume(struct device *dev)
435{
436	struct stm32_crc *crc = dev_get_drvdata(dev);
437	int ret;
438
439	ret = clk_enable(crc->clk);
440	if (ret) {
441		dev_err(crc->dev, "Failed to enable clock\n");
442		return ret;
443	}
444
445	return 0;
446}
 
447
448static const struct dev_pm_ops stm32_crc_pm_ops = {
449	SET_SYSTEM_SLEEP_PM_OPS(stm32_crc_suspend,
450				stm32_crc_resume)
451	SET_RUNTIME_PM_OPS(stm32_crc_runtime_suspend,
452			   stm32_crc_runtime_resume, NULL)
453};
454
455static const struct of_device_id stm32_dt_ids[] = {
456	{ .compatible = "st,stm32f7-crc", },
457	{},
458};
459MODULE_DEVICE_TABLE(of, stm32_dt_ids);
460
461static struct platform_driver stm32_crc_driver = {
462	.probe  = stm32_crc_probe,
463	.remove = stm32_crc_remove,
464	.driver = {
465		.name           = DRIVER_NAME,
466		.pm		= &stm32_crc_pm_ops,
467		.of_match_table = stm32_dt_ids,
468	},
469};
470
471module_platform_driver(stm32_crc_driver);
472
473MODULE_AUTHOR("Fabien Dessenne <fabien.dessenne@st.com>");
474MODULE_DESCRIPTION("STMicrolectronics STM32 CRC32 hardware driver");
475MODULE_LICENSE("GPL");
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) STMicroelectronics SA 2017
  4 * Author: Fabien Dessenne <fabien.dessenne@st.com>
  5 */
  6
  7#include <linux/bitrev.h>
  8#include <linux/clk.h>
  9#include <linux/crc32poly.h>
 10#include <linux/module.h>
 11#include <linux/mod_devicetable.h>
 12#include <linux/platform_device.h>
 13#include <linux/pm_runtime.h>
 14
 15#include <crypto/internal/hash.h>
 16
 17#include <asm/unaligned.h>
 18
 19#define DRIVER_NAME             "stm32-crc32"
 20#define CHKSUM_DIGEST_SIZE      4
 21#define CHKSUM_BLOCK_SIZE       1
 22
 23/* Registers */
 24#define CRC_DR                  0x00000000
 25#define CRC_CR                  0x00000008
 26#define CRC_INIT                0x00000010
 27#define CRC_POL                 0x00000014
 28
 29/* Registers values */
 30#define CRC_CR_RESET            BIT(0)
 31#define CRC_CR_REVERSE          (BIT(7) | BIT(6) | BIT(5))
 32#define CRC_INIT_DEFAULT        0xFFFFFFFF
 
 
 33
 34#define CRC_AUTOSUSPEND_DELAY	50
 35
 
 
 
 
 36struct stm32_crc {
 37	struct list_head list;
 38	struct device    *dev;
 39	void __iomem     *regs;
 40	struct clk       *clk;
 41	u8               pending_data[sizeof(u32)];
 42	size_t           nb_pending_bytes;
 43};
 44
 45struct stm32_crc_list {
 46	struct list_head dev_list;
 47	spinlock_t       lock; /* protect dev_list */
 48};
 49
 50static struct stm32_crc_list crc_list = {
 51	.dev_list = LIST_HEAD_INIT(crc_list.dev_list),
 52	.lock     = __SPIN_LOCK_UNLOCKED(crc_list.lock),
 53};
 54
 55struct stm32_crc_ctx {
 56	u32 key;
 57	u32 poly;
 58};
 59
 60struct stm32_crc_desc_ctx {
 61	u32    partial; /* crc32c: partial in first 4 bytes of that struct */
 62	struct stm32_crc *crc;
 63};
 64
 65static int stm32_crc32_cra_init(struct crypto_tfm *tfm)
 66{
 67	struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
 68
 69	mctx->key = CRC_INIT_DEFAULT;
 70	mctx->poly = CRC32_POLY_LE;
 71	return 0;
 72}
 73
 74static int stm32_crc32c_cra_init(struct crypto_tfm *tfm)
 75{
 76	struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm);
 77
 78	mctx->key = CRC_INIT_DEFAULT;
 79	mctx->poly = CRC32C_POLY_LE;
 80	return 0;
 81}
 82
 83static int stm32_crc_setkey(struct crypto_shash *tfm, const u8 *key,
 84			    unsigned int keylen)
 85{
 86	struct stm32_crc_ctx *mctx = crypto_shash_ctx(tfm);
 87
 88	if (keylen != sizeof(u32)) {
 89		crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
 90		return -EINVAL;
 91	}
 92
 93	mctx->key = get_unaligned_le32(key);
 94	return 0;
 95}
 96
 
 
 
 
 
 
 
 
 
 
 
 
 
 97static int stm32_crc_init(struct shash_desc *desc)
 98{
 99	struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
100	struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
101	struct stm32_crc *crc;
 
102
103	spin_lock_bh(&crc_list.lock);
104	list_for_each_entry(crc, &crc_list.dev_list, list) {
105		ctx->crc = crc;
106		break;
107	}
108	spin_unlock_bh(&crc_list.lock);
109
110	pm_runtime_get_sync(ctx->crc->dev);
111
112	/* Reset, set key, poly and configure in bit reverse mode */
113	writel_relaxed(bitrev32(mctx->key), ctx->crc->regs + CRC_INIT);
114	writel_relaxed(bitrev32(mctx->poly), ctx->crc->regs + CRC_POL);
115	writel_relaxed(CRC_CR_RESET | CRC_CR_REVERSE, ctx->crc->regs + CRC_CR);
 
116
117	/* Store partial result */
118	ctx->partial = readl_relaxed(ctx->crc->regs + CRC_DR);
119	ctx->crc->nb_pending_bytes = 0;
120
121	pm_runtime_mark_last_busy(ctx->crc->dev);
122	pm_runtime_put_autosuspend(ctx->crc->dev);
 
 
123
124	return 0;
125}
126
127static int stm32_crc_update(struct shash_desc *desc, const u8 *d8,
128			    unsigned int length)
129{
130	struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
131	struct stm32_crc *crc = ctx->crc;
132	u32 *d32;
133	unsigned int i;
 
 
 
 
134
135	pm_runtime_get_sync(crc->dev);
136
137	if (unlikely(crc->nb_pending_bytes)) {
138		while (crc->nb_pending_bytes != sizeof(u32) && length) {
139			/* Fill in pending data */
140			crc->pending_data[crc->nb_pending_bytes++] = *(d8++);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
141			length--;
142		}
 
 
 
 
 
 
 
143
144		if (crc->nb_pending_bytes == sizeof(u32)) {
145			/* Process completed pending data */
146			writel_relaxed(*(u32 *)crc->pending_data,
147				       crc->regs + CRC_DR);
148			crc->nb_pending_bytes = 0;
149		}
150	}
151
152	d32 = (u32 *)d8;
153	for (i = 0; i < length >> 2; i++)
154		/* Process 32 bits data */
155		writel_relaxed(*(d32++), crc->regs + CRC_DR);
156
157	/* Store partial result */
158	ctx->partial = readl_relaxed(crc->regs + CRC_DR);
159
 
 
160	pm_runtime_mark_last_busy(crc->dev);
161	pm_runtime_put_autosuspend(crc->dev);
162
163	/* Check for pending data (non 32 bits) */
164	length &= 3;
165	if (likely(!length))
166		return 0;
167
168	if ((crc->nb_pending_bytes + length) >= sizeof(u32)) {
169		/* Shall not happen */
170		dev_err(crc->dev, "Pending data overflow\n");
171		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
172	}
173
174	d8 = (const u8 *)d32;
175	for (i = 0; i < length; i++)
176		/* Store pending data */
177		crc->pending_data[crc->nb_pending_bytes++] = *(d8++);
178
179	return 0;
180}
181
182static int stm32_crc_final(struct shash_desc *desc, u8 *out)
183{
184	struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc);
185	struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm);
186
187	/* Send computed CRC */
188	put_unaligned_le32(mctx->poly == CRC32C_POLY_LE ?
189			   ~ctx->partial : ctx->partial, out);
190
191	return 0;
192}
193
194static int stm32_crc_finup(struct shash_desc *desc, const u8 *data,
195			   unsigned int length, u8 *out)
196{
197	return stm32_crc_update(desc, data, length) ?:
198	       stm32_crc_final(desc, out);
199}
200
201static int stm32_crc_digest(struct shash_desc *desc, const u8 *data,
202			    unsigned int length, u8 *out)
203{
204	return stm32_crc_init(desc) ?: stm32_crc_finup(desc, data, length, out);
205}
206
 
 
207static struct shash_alg algs[] = {
208	/* CRC-32 */
209	{
210		.setkey         = stm32_crc_setkey,
211		.init           = stm32_crc_init,
212		.update         = stm32_crc_update,
213		.final          = stm32_crc_final,
214		.finup          = stm32_crc_finup,
215		.digest         = stm32_crc_digest,
216		.descsize       = sizeof(struct stm32_crc_desc_ctx),
217		.digestsize     = CHKSUM_DIGEST_SIZE,
218		.base           = {
219			.cra_name               = "crc32",
220			.cra_driver_name        = DRIVER_NAME,
221			.cra_priority           = 200,
222			.cra_flags		= CRYPTO_ALG_OPTIONAL_KEY,
223			.cra_blocksize          = CHKSUM_BLOCK_SIZE,
224			.cra_alignmask          = 3,
225			.cra_ctxsize            = sizeof(struct stm32_crc_ctx),
226			.cra_module             = THIS_MODULE,
227			.cra_init               = stm32_crc32_cra_init,
228		}
229	},
230	/* CRC-32Castagnoli */
231	{
232		.setkey         = stm32_crc_setkey,
233		.init           = stm32_crc_init,
234		.update         = stm32_crc_update,
235		.final          = stm32_crc_final,
236		.finup          = stm32_crc_finup,
237		.digest         = stm32_crc_digest,
238		.descsize       = sizeof(struct stm32_crc_desc_ctx),
239		.digestsize     = CHKSUM_DIGEST_SIZE,
240		.base           = {
241			.cra_name               = "crc32c",
242			.cra_driver_name        = DRIVER_NAME,
243			.cra_priority           = 200,
244			.cra_flags		= CRYPTO_ALG_OPTIONAL_KEY,
245			.cra_blocksize          = CHKSUM_BLOCK_SIZE,
246			.cra_alignmask          = 3,
247			.cra_ctxsize            = sizeof(struct stm32_crc_ctx),
248			.cra_module             = THIS_MODULE,
249			.cra_init               = stm32_crc32c_cra_init,
250		}
251	}
252};
253
254static int stm32_crc_probe(struct platform_device *pdev)
255{
256	struct device *dev = &pdev->dev;
257	struct stm32_crc *crc;
258	int ret;
259
260	crc = devm_kzalloc(dev, sizeof(*crc), GFP_KERNEL);
261	if (!crc)
262		return -ENOMEM;
263
264	crc->dev = dev;
265
266	crc->regs = devm_platform_ioremap_resource(pdev, 0);
267	if (IS_ERR(crc->regs)) {
268		dev_err(dev, "Cannot map CRC IO\n");
269		return PTR_ERR(crc->regs);
270	}
271
272	crc->clk = devm_clk_get(dev, NULL);
273	if (IS_ERR(crc->clk)) {
274		dev_err(dev, "Could not get clock\n");
275		return PTR_ERR(crc->clk);
276	}
277
278	ret = clk_prepare_enable(crc->clk);
279	if (ret) {
280		dev_err(crc->dev, "Failed to enable clock\n");
281		return ret;
282	}
283
284	pm_runtime_set_autosuspend_delay(dev, CRC_AUTOSUSPEND_DELAY);
285	pm_runtime_use_autosuspend(dev);
286
287	pm_runtime_get_noresume(dev);
288	pm_runtime_set_active(dev);
 
289	pm_runtime_enable(dev);
290
 
 
291	platform_set_drvdata(pdev, crc);
292
293	spin_lock(&crc_list.lock);
294	list_add(&crc->list, &crc_list.dev_list);
295	spin_unlock(&crc_list.lock);
296
297	ret = crypto_register_shashes(algs, ARRAY_SIZE(algs));
298	if (ret) {
299		dev_err(dev, "Failed to register\n");
300		clk_disable_unprepare(crc->clk);
301		return ret;
 
 
 
 
302	}
 
 
303
304	dev_info(dev, "Initialized\n");
305
306	pm_runtime_put_sync(dev);
307
308	return 0;
309}
310
311static int stm32_crc_remove(struct platform_device *pdev)
312{
313	struct stm32_crc *crc = platform_get_drvdata(pdev);
314	int ret = pm_runtime_get_sync(crc->dev);
315
316	if (ret < 0)
317		return ret;
318
319	spin_lock(&crc_list.lock);
320	list_del(&crc->list);
321	spin_unlock(&crc_list.lock);
322
323	crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
 
 
 
324
325	pm_runtime_disable(crc->dev);
326	pm_runtime_put_noidle(crc->dev);
327
328	clk_disable_unprepare(crc->clk);
329
330	return 0;
331}
332
333#ifdef CONFIG_PM
334static int stm32_crc_runtime_suspend(struct device *dev)
335{
336	struct stm32_crc *crc = dev_get_drvdata(dev);
 
337
338	clk_disable_unprepare(crc->clk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
339
340	return 0;
341}
342
343static int stm32_crc_runtime_resume(struct device *dev)
344{
345	struct stm32_crc *crc = dev_get_drvdata(dev);
346	int ret;
347
348	ret = clk_prepare_enable(crc->clk);
349	if (ret) {
350		dev_err(crc->dev, "Failed to prepare_enable clock\n");
351		return ret;
352	}
353
354	return 0;
355}
356#endif
357
358static const struct dev_pm_ops stm32_crc_pm_ops = {
359	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
360				pm_runtime_force_resume)
361	SET_RUNTIME_PM_OPS(stm32_crc_runtime_suspend,
362			   stm32_crc_runtime_resume, NULL)
363};
364
365static const struct of_device_id stm32_dt_ids[] = {
366	{ .compatible = "st,stm32f7-crc", },
367	{},
368};
369MODULE_DEVICE_TABLE(of, stm32_dt_ids);
370
371static struct platform_driver stm32_crc_driver = {
372	.probe  = stm32_crc_probe,
373	.remove = stm32_crc_remove,
374	.driver = {
375		.name           = DRIVER_NAME,
376		.pm		= &stm32_crc_pm_ops,
377		.of_match_table = stm32_dt_ids,
378	},
379};
380
381module_platform_driver(stm32_crc_driver);
382
383MODULE_AUTHOR("Fabien Dessenne <fabien.dessenne@st.com>");
384MODULE_DESCRIPTION("STMicrolectronics STM32 CRC32 hardware driver");
385MODULE_LICENSE("GPL");