Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Cryptographic API.
  4 *
  5 * Support for VIA PadLock hardware crypto engine.
  6 *
  7 * Copyright (c) 2006  Michal Ludvig <michal@logix.cz>
 
 
 
 
 
 
  8 */
  9
 10#include <crypto/internal/hash.h>
 11#include <crypto/padlock.h>
 12#include <crypto/sha.h>
 13#include <linux/err.h>
 14#include <linux/module.h>
 15#include <linux/init.h>
 16#include <linux/errno.h>
 17#include <linux/interrupt.h>
 18#include <linux/kernel.h>
 19#include <linux/scatterlist.h>
 20#include <asm/cpu_device_id.h>
 21#include <asm/fpu/api.h>
 22
 23struct padlock_sha_desc {
 24	struct shash_desc fallback;
 25};
 26
 27struct padlock_sha_ctx {
 28	struct crypto_shash *fallback;
 29};
 30
 31static int padlock_sha_init(struct shash_desc *desc)
 32{
 33	struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
 34	struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
 35
 36	dctx->fallback.tfm = ctx->fallback;
 
 37	return crypto_shash_init(&dctx->fallback);
 38}
 39
 40static int padlock_sha_update(struct shash_desc *desc,
 41			      const u8 *data, unsigned int length)
 42{
 43	struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
 44
 
 45	return crypto_shash_update(&dctx->fallback, data, length);
 46}
 47
 48static int padlock_sha_export(struct shash_desc *desc, void *out)
 49{
 50	struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
 51
 52	return crypto_shash_export(&dctx->fallback, out);
 53}
 54
 55static int padlock_sha_import(struct shash_desc *desc, const void *in)
 56{
 57	struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
 58	struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
 59
 60	dctx->fallback.tfm = ctx->fallback;
 
 61	return crypto_shash_import(&dctx->fallback, in);
 62}
 63
 64static inline void padlock_output_block(uint32_t *src,
 65		 	uint32_t *dst, size_t count)
 66{
 67	while (count--)
 68		*dst++ = swab32(*src++);
 69}
 70
 71static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in,
 72			      unsigned int count, u8 *out)
 73{
 74	/* We can't store directly to *out as it may be unaligned. */
 75	/* BTW Don't reduce the buffer size below 128 Bytes!
 76	 *     PadLock microcode needs it that big. */
 77	char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
 78		((aligned(STACK_ALIGN)));
 79	char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
 80	struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
 81	struct sha1_state state;
 82	unsigned int space;
 83	unsigned int leftover;
 
 84	int err;
 85
 
 86	err = crypto_shash_export(&dctx->fallback, &state);
 87	if (err)
 88		goto out;
 89
 90	if (state.count + count > ULONG_MAX)
 91		return crypto_shash_finup(&dctx->fallback, in, count, out);
 92
 93	leftover = ((state.count - 1) & (SHA1_BLOCK_SIZE - 1)) + 1;
 94	space =  SHA1_BLOCK_SIZE - leftover;
 95	if (space) {
 96		if (count > space) {
 97			err = crypto_shash_update(&dctx->fallback, in, space) ?:
 98			      crypto_shash_export(&dctx->fallback, &state);
 99			if (err)
100				goto out;
101			count -= space;
102			in += space;
103		} else {
104			memcpy(state.buffer + leftover, in, count);
105			in = state.buffer;
106			count += leftover;
107			state.count &= ~(SHA1_BLOCK_SIZE - 1);
108		}
109	}
110
111	memcpy(result, &state.state, SHA1_DIGEST_SIZE);
112
 
 
113	asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
114		      : \
115		      : "c"((unsigned long)state.count + count), \
116			"a"((unsigned long)state.count), \
117			"S"(in), "D"(result));
 
118
119	padlock_output_block((uint32_t *)result, (uint32_t *)out, 5);
120
121out:
122	return err;
123}
124
125static int padlock_sha1_final(struct shash_desc *desc, u8 *out)
126{
127	u8 buf[4];
128
129	return padlock_sha1_finup(desc, buf, 0, out);
130}
131
132static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in,
133				unsigned int count, u8 *out)
134{
135	/* We can't store directly to *out as it may be unaligned. */
136	/* BTW Don't reduce the buffer size below 128 Bytes!
137	 *     PadLock microcode needs it that big. */
138	char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
139		((aligned(STACK_ALIGN)));
140	char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
141	struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
142	struct sha256_state state;
143	unsigned int space;
144	unsigned int leftover;
 
145	int err;
146
 
147	err = crypto_shash_export(&dctx->fallback, &state);
148	if (err)
149		goto out;
150
151	if (state.count + count > ULONG_MAX)
152		return crypto_shash_finup(&dctx->fallback, in, count, out);
153
154	leftover = ((state.count - 1) & (SHA256_BLOCK_SIZE - 1)) + 1;
155	space =  SHA256_BLOCK_SIZE - leftover;
156	if (space) {
157		if (count > space) {
158			err = crypto_shash_update(&dctx->fallback, in, space) ?:
159			      crypto_shash_export(&dctx->fallback, &state);
160			if (err)
161				goto out;
162			count -= space;
163			in += space;
164		} else {
165			memcpy(state.buf + leftover, in, count);
166			in = state.buf;
167			count += leftover;
168			state.count &= ~(SHA1_BLOCK_SIZE - 1);
169		}
170	}
171
172	memcpy(result, &state.state, SHA256_DIGEST_SIZE);
173
 
 
174	asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
175		      : \
176		      : "c"((unsigned long)state.count + count), \
177			"a"((unsigned long)state.count), \
178			"S"(in), "D"(result));
 
179
180	padlock_output_block((uint32_t *)result, (uint32_t *)out, 8);
181
182out:
183	return err;
184}
185
186static int padlock_sha256_final(struct shash_desc *desc, u8 *out)
187{
188	u8 buf[4];
189
190	return padlock_sha256_finup(desc, buf, 0, out);
191}
192
193static int padlock_init_tfm(struct crypto_shash *hash)
194{
195	const char *fallback_driver_name = crypto_shash_alg_name(hash);
196	struct padlock_sha_ctx *ctx = crypto_shash_ctx(hash);
 
197	struct crypto_shash *fallback_tfm;
 
198
199	/* Allocate a fallback and abort if it failed. */
200	fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
201					  CRYPTO_ALG_NEED_FALLBACK);
202	if (IS_ERR(fallback_tfm)) {
203		printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n",
204		       fallback_driver_name);
205		return PTR_ERR(fallback_tfm);
 
206	}
207
208	ctx->fallback = fallback_tfm;
209	hash->descsize += crypto_shash_descsize(fallback_tfm);
210	return 0;
 
 
 
211}
212
213static void padlock_exit_tfm(struct crypto_shash *hash)
214{
215	struct padlock_sha_ctx *ctx = crypto_shash_ctx(hash);
216
217	crypto_free_shash(ctx->fallback);
218}
219
220static struct shash_alg sha1_alg = {
221	.digestsize	=	SHA1_DIGEST_SIZE,
222	.init   	= 	padlock_sha_init,
223	.update 	=	padlock_sha_update,
224	.finup  	=	padlock_sha1_finup,
225	.final  	=	padlock_sha1_final,
226	.export		=	padlock_sha_export,
227	.import		=	padlock_sha_import,
228	.init_tfm	=	padlock_init_tfm,
229	.exit_tfm	=	padlock_exit_tfm,
230	.descsize	=	sizeof(struct padlock_sha_desc),
231	.statesize	=	sizeof(struct sha1_state),
232	.base		=	{
233		.cra_name		=	"sha1",
234		.cra_driver_name	=	"sha1-padlock",
235		.cra_priority		=	PADLOCK_CRA_PRIORITY,
236		.cra_flags		=	CRYPTO_ALG_NEED_FALLBACK,
 
237		.cra_blocksize		=	SHA1_BLOCK_SIZE,
238		.cra_ctxsize		=	sizeof(struct padlock_sha_ctx),
239		.cra_module		=	THIS_MODULE,
 
 
240	}
241};
242
243static struct shash_alg sha256_alg = {
244	.digestsize	=	SHA256_DIGEST_SIZE,
245	.init   	= 	padlock_sha_init,
246	.update 	=	padlock_sha_update,
247	.finup  	=	padlock_sha256_finup,
248	.final  	=	padlock_sha256_final,
249	.export		=	padlock_sha_export,
250	.import		=	padlock_sha_import,
251	.init_tfm	=	padlock_init_tfm,
252	.exit_tfm	=	padlock_exit_tfm,
253	.descsize	=	sizeof(struct padlock_sha_desc),
254	.statesize	=	sizeof(struct sha256_state),
255	.base		=	{
256		.cra_name		=	"sha256",
257		.cra_driver_name	=	"sha256-padlock",
258		.cra_priority		=	PADLOCK_CRA_PRIORITY,
259		.cra_flags		=	CRYPTO_ALG_NEED_FALLBACK,
 
260		.cra_blocksize		=	SHA256_BLOCK_SIZE,
261		.cra_ctxsize		=	sizeof(struct padlock_sha_ctx),
262		.cra_module		=	THIS_MODULE,
 
 
263	}
264};
265
266/* Add two shash_alg instance for hardware-implemented *
267* multiple-parts hash supported by VIA Nano Processor.*/
268static int padlock_sha1_init_nano(struct shash_desc *desc)
269{
270	struct sha1_state *sctx = shash_desc_ctx(desc);
271
272	*sctx = (struct sha1_state){
273		.state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
274	};
275
276	return 0;
277}
278
279static int padlock_sha1_update_nano(struct shash_desc *desc,
280			const u8 *data,	unsigned int len)
281{
282	struct sha1_state *sctx = shash_desc_ctx(desc);
283	unsigned int partial, done;
284	const u8 *src;
285	/*The PHE require the out buffer must 128 bytes and 16-bytes aligned*/
286	u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
287		((aligned(STACK_ALIGN)));
288	u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
 
289
290	partial = sctx->count & 0x3f;
291	sctx->count += len;
292	done = 0;
293	src = data;
294	memcpy(dst, (u8 *)(sctx->state), SHA1_DIGEST_SIZE);
295
296	if ((partial + len) >= SHA1_BLOCK_SIZE) {
297
298		/* Append the bytes in state's buffer to a block to handle */
299		if (partial) {
300			done = -partial;
301			memcpy(sctx->buffer + partial, data,
302				done + SHA1_BLOCK_SIZE);
303			src = sctx->buffer;
 
304			asm volatile (".byte 0xf3,0x0f,0xa6,0xc8"
305			: "+S"(src), "+D"(dst) \
306			: "a"((long)-1), "c"((unsigned long)1));
 
307			done += SHA1_BLOCK_SIZE;
308			src = data + done;
309		}
310
311		/* Process the left bytes from the input data */
312		if (len - done >= SHA1_BLOCK_SIZE) {
 
313			asm volatile (".byte 0xf3,0x0f,0xa6,0xc8"
314			: "+S"(src), "+D"(dst)
315			: "a"((long)-1),
316			"c"((unsigned long)((len - done) / SHA1_BLOCK_SIZE)));
 
317			done += ((len - done) - (len - done) % SHA1_BLOCK_SIZE);
318			src = data + done;
319		}
320		partial = 0;
321	}
322	memcpy((u8 *)(sctx->state), dst, SHA1_DIGEST_SIZE);
323	memcpy(sctx->buffer + partial, src, len - done);
324
325	return 0;
326}
327
328static int padlock_sha1_final_nano(struct shash_desc *desc, u8 *out)
329{
330	struct sha1_state *state = (struct sha1_state *)shash_desc_ctx(desc);
331	unsigned int partial, padlen;
332	__be64 bits;
333	static const u8 padding[64] = { 0x80, };
334
335	bits = cpu_to_be64(state->count << 3);
336
337	/* Pad out to 56 mod 64 */
338	partial = state->count & 0x3f;
339	padlen = (partial < 56) ? (56 - partial) : ((64+56) - partial);
340	padlock_sha1_update_nano(desc, padding, padlen);
341
342	/* Append length field bytes */
343	padlock_sha1_update_nano(desc, (const u8 *)&bits, sizeof(bits));
344
345	/* Swap to output */
346	padlock_output_block((uint32_t *)(state->state), (uint32_t *)out, 5);
347
348	return 0;
349}
350
351static int padlock_sha256_init_nano(struct shash_desc *desc)
352{
353	struct sha256_state *sctx = shash_desc_ctx(desc);
354
355	*sctx = (struct sha256_state){
356		.state = { SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, \
357				SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7},
358	};
359
360	return 0;
361}
362
363static int padlock_sha256_update_nano(struct shash_desc *desc, const u8 *data,
364			  unsigned int len)
365{
366	struct sha256_state *sctx = shash_desc_ctx(desc);
367	unsigned int partial, done;
368	const u8 *src;
369	/*The PHE require the out buffer must 128 bytes and 16-bytes aligned*/
370	u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
371		((aligned(STACK_ALIGN)));
372	u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
 
373
374	partial = sctx->count & 0x3f;
375	sctx->count += len;
376	done = 0;
377	src = data;
378	memcpy(dst, (u8 *)(sctx->state), SHA256_DIGEST_SIZE);
379
380	if ((partial + len) >= SHA256_BLOCK_SIZE) {
381
382		/* Append the bytes in state's buffer to a block to handle */
383		if (partial) {
384			done = -partial;
385			memcpy(sctx->buf + partial, data,
386				done + SHA256_BLOCK_SIZE);
387			src = sctx->buf;
 
388			asm volatile (".byte 0xf3,0x0f,0xa6,0xd0"
389			: "+S"(src), "+D"(dst)
390			: "a"((long)-1), "c"((unsigned long)1));
 
391			done += SHA256_BLOCK_SIZE;
392			src = data + done;
393		}
394
395		/* Process the left bytes from input data*/
396		if (len - done >= SHA256_BLOCK_SIZE) {
 
397			asm volatile (".byte 0xf3,0x0f,0xa6,0xd0"
398			: "+S"(src), "+D"(dst)
399			: "a"((long)-1),
400			"c"((unsigned long)((len - done) / 64)));
 
401			done += ((len - done) - (len - done) % 64);
402			src = data + done;
403		}
404		partial = 0;
405	}
406	memcpy((u8 *)(sctx->state), dst, SHA256_DIGEST_SIZE);
407	memcpy(sctx->buf + partial, src, len - done);
408
409	return 0;
410}
411
412static int padlock_sha256_final_nano(struct shash_desc *desc, u8 *out)
413{
414	struct sha256_state *state =
415		(struct sha256_state *)shash_desc_ctx(desc);
416	unsigned int partial, padlen;
417	__be64 bits;
418	static const u8 padding[64] = { 0x80, };
419
420	bits = cpu_to_be64(state->count << 3);
421
422	/* Pad out to 56 mod 64 */
423	partial = state->count & 0x3f;
424	padlen = (partial < 56) ? (56 - partial) : ((64+56) - partial);
425	padlock_sha256_update_nano(desc, padding, padlen);
426
427	/* Append length field bytes */
428	padlock_sha256_update_nano(desc, (const u8 *)&bits, sizeof(bits));
429
430	/* Swap to output */
431	padlock_output_block((uint32_t *)(state->state), (uint32_t *)out, 8);
432
433	return 0;
434}
435
436static int padlock_sha_export_nano(struct shash_desc *desc,
437				void *out)
438{
439	int statesize = crypto_shash_statesize(desc->tfm);
440	void *sctx = shash_desc_ctx(desc);
441
442	memcpy(out, sctx, statesize);
443	return 0;
444}
445
446static int padlock_sha_import_nano(struct shash_desc *desc,
447				const void *in)
448{
449	int statesize = crypto_shash_statesize(desc->tfm);
450	void *sctx = shash_desc_ctx(desc);
451
452	memcpy(sctx, in, statesize);
453	return 0;
454}
455
456static struct shash_alg sha1_alg_nano = {
457	.digestsize	=	SHA1_DIGEST_SIZE,
458	.init		=	padlock_sha1_init_nano,
459	.update		=	padlock_sha1_update_nano,
460	.final		=	padlock_sha1_final_nano,
461	.export		=	padlock_sha_export_nano,
462	.import		=	padlock_sha_import_nano,
463	.descsize	=	sizeof(struct sha1_state),
464	.statesize	=	sizeof(struct sha1_state),
465	.base		=	{
466		.cra_name		=	"sha1",
467		.cra_driver_name	=	"sha1-padlock-nano",
468		.cra_priority		=	PADLOCK_CRA_PRIORITY,
 
469		.cra_blocksize		=	SHA1_BLOCK_SIZE,
470		.cra_module		=	THIS_MODULE,
471	}
472};
473
474static struct shash_alg sha256_alg_nano = {
475	.digestsize	=	SHA256_DIGEST_SIZE,
476	.init		=	padlock_sha256_init_nano,
477	.update		=	padlock_sha256_update_nano,
478	.final		=	padlock_sha256_final_nano,
479	.export		=	padlock_sha_export_nano,
480	.import		=	padlock_sha_import_nano,
481	.descsize	=	sizeof(struct sha256_state),
482	.statesize	=	sizeof(struct sha256_state),
483	.base		=	{
484		.cra_name		=	"sha256",
485		.cra_driver_name	=	"sha256-padlock-nano",
486		.cra_priority		=	PADLOCK_CRA_PRIORITY,
 
487		.cra_blocksize		=	SHA256_BLOCK_SIZE,
488		.cra_module		=	THIS_MODULE,
489	}
490};
491
492static const struct x86_cpu_id padlock_sha_ids[] = {
493	X86_MATCH_FEATURE(X86_FEATURE_PHE, NULL),
494	{}
495};
496MODULE_DEVICE_TABLE(x86cpu, padlock_sha_ids);
497
498static int __init padlock_init(void)
499{
500	int rc = -ENODEV;
501	struct cpuinfo_x86 *c = &cpu_data(0);
502	struct shash_alg *sha1;
503	struct shash_alg *sha256;
504
505	if (!x86_match_cpu(padlock_sha_ids) || !boot_cpu_has(X86_FEATURE_PHE_EN))
 
 
 
 
 
 
506		return -ENODEV;
 
507
508	/* Register the newly added algorithm module if on *
509	* VIA Nano processor, or else just do as before */
510	if (c->x86_model < 0x0f) {
511		sha1 = &sha1_alg;
512		sha256 = &sha256_alg;
513	} else {
514		sha1 = &sha1_alg_nano;
515		sha256 = &sha256_alg_nano;
516	}
517
518	rc = crypto_register_shash(sha1);
519	if (rc)
520		goto out;
521
522	rc = crypto_register_shash(sha256);
523	if (rc)
524		goto out_unreg1;
525
526	printk(KERN_NOTICE PFX "Using VIA PadLock ACE for SHA1/SHA256 algorithms.\n");
527
528	return 0;
529
530out_unreg1:
531	crypto_unregister_shash(sha1);
532
533out:
534	printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n");
535	return rc;
536}
537
538static void __exit padlock_fini(void)
539{
540	struct cpuinfo_x86 *c = &cpu_data(0);
541
542	if (c->x86_model >= 0x0f) {
543		crypto_unregister_shash(&sha1_alg_nano);
544		crypto_unregister_shash(&sha256_alg_nano);
545	} else {
546		crypto_unregister_shash(&sha1_alg);
547		crypto_unregister_shash(&sha256_alg);
548	}
549}
550
551module_init(padlock_init);
552module_exit(padlock_fini);
553
554MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support.");
555MODULE_LICENSE("GPL");
556MODULE_AUTHOR("Michal Ludvig");
557
558MODULE_ALIAS_CRYPTO("sha1-all");
559MODULE_ALIAS_CRYPTO("sha256-all");
560MODULE_ALIAS_CRYPTO("sha1-padlock");
561MODULE_ALIAS_CRYPTO("sha256-padlock");
v3.1
 
  1/*
  2 * Cryptographic API.
  3 *
  4 * Support for VIA PadLock hardware crypto engine.
  5 *
  6 * Copyright (c) 2006  Michal Ludvig <michal@logix.cz>
  7 *
  8 * This program is free software; you can redistribute it and/or modify
  9 * it under the terms of the GNU General Public License as published by
 10 * the Free Software Foundation; either version 2 of the License, or
 11 * (at your option) any later version.
 12 *
 13 */
 14
 15#include <crypto/internal/hash.h>
 16#include <crypto/padlock.h>
 17#include <crypto/sha.h>
 18#include <linux/err.h>
 19#include <linux/module.h>
 20#include <linux/init.h>
 21#include <linux/errno.h>
 22#include <linux/interrupt.h>
 23#include <linux/kernel.h>
 24#include <linux/scatterlist.h>
 25#include <asm/i387.h>
 
 26
 27struct padlock_sha_desc {
 28	struct shash_desc fallback;
 29};
 30
 31struct padlock_sha_ctx {
 32	struct crypto_shash *fallback;
 33};
 34
 35static int padlock_sha_init(struct shash_desc *desc)
 36{
 37	struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
 38	struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
 39
 40	dctx->fallback.tfm = ctx->fallback;
 41	dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
 42	return crypto_shash_init(&dctx->fallback);
 43}
 44
 45static int padlock_sha_update(struct shash_desc *desc,
 46			      const u8 *data, unsigned int length)
 47{
 48	struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
 49
 50	dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
 51	return crypto_shash_update(&dctx->fallback, data, length);
 52}
 53
 54static int padlock_sha_export(struct shash_desc *desc, void *out)
 55{
 56	struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
 57
 58	return crypto_shash_export(&dctx->fallback, out);
 59}
 60
 61static int padlock_sha_import(struct shash_desc *desc, const void *in)
 62{
 63	struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
 64	struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
 65
 66	dctx->fallback.tfm = ctx->fallback;
 67	dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
 68	return crypto_shash_import(&dctx->fallback, in);
 69}
 70
 71static inline void padlock_output_block(uint32_t *src,
 72		 	uint32_t *dst, size_t count)
 73{
 74	while (count--)
 75		*dst++ = swab32(*src++);
 76}
 77
 78static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in,
 79			      unsigned int count, u8 *out)
 80{
 81	/* We can't store directly to *out as it may be unaligned. */
 82	/* BTW Don't reduce the buffer size below 128 Bytes!
 83	 *     PadLock microcode needs it that big. */
 84	char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
 85		((aligned(STACK_ALIGN)));
 86	char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
 87	struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
 88	struct sha1_state state;
 89	unsigned int space;
 90	unsigned int leftover;
 91	int ts_state;
 92	int err;
 93
 94	dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
 95	err = crypto_shash_export(&dctx->fallback, &state);
 96	if (err)
 97		goto out;
 98
 99	if (state.count + count > ULONG_MAX)
100		return crypto_shash_finup(&dctx->fallback, in, count, out);
101
102	leftover = ((state.count - 1) & (SHA1_BLOCK_SIZE - 1)) + 1;
103	space =  SHA1_BLOCK_SIZE - leftover;
104	if (space) {
105		if (count > space) {
106			err = crypto_shash_update(&dctx->fallback, in, space) ?:
107			      crypto_shash_export(&dctx->fallback, &state);
108			if (err)
109				goto out;
110			count -= space;
111			in += space;
112		} else {
113			memcpy(state.buffer + leftover, in, count);
114			in = state.buffer;
115			count += leftover;
116			state.count &= ~(SHA1_BLOCK_SIZE - 1);
117		}
118	}
119
120	memcpy(result, &state.state, SHA1_DIGEST_SIZE);
121
122	/* prevent taking the spurious DNA fault with padlock. */
123	ts_state = irq_ts_save();
124	asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" /* rep xsha1 */
125		      : \
126		      : "c"((unsigned long)state.count + count), \
127			"a"((unsigned long)state.count), \
128			"S"(in), "D"(result));
129	irq_ts_restore(ts_state);
130
131	padlock_output_block((uint32_t *)result, (uint32_t *)out, 5);
132
133out:
134	return err;
135}
136
137static int padlock_sha1_final(struct shash_desc *desc, u8 *out)
138{
139	u8 buf[4];
140
141	return padlock_sha1_finup(desc, buf, 0, out);
142}
143
144static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in,
145				unsigned int count, u8 *out)
146{
147	/* We can't store directly to *out as it may be unaligned. */
148	/* BTW Don't reduce the buffer size below 128 Bytes!
149	 *     PadLock microcode needs it that big. */
150	char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
151		((aligned(STACK_ALIGN)));
152	char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
153	struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
154	struct sha256_state state;
155	unsigned int space;
156	unsigned int leftover;
157	int ts_state;
158	int err;
159
160	dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
161	err = crypto_shash_export(&dctx->fallback, &state);
162	if (err)
163		goto out;
164
165	if (state.count + count > ULONG_MAX)
166		return crypto_shash_finup(&dctx->fallback, in, count, out);
167
168	leftover = ((state.count - 1) & (SHA256_BLOCK_SIZE - 1)) + 1;
169	space =  SHA256_BLOCK_SIZE - leftover;
170	if (space) {
171		if (count > space) {
172			err = crypto_shash_update(&dctx->fallback, in, space) ?:
173			      crypto_shash_export(&dctx->fallback, &state);
174			if (err)
175				goto out;
176			count -= space;
177			in += space;
178		} else {
179			memcpy(state.buf + leftover, in, count);
180			in = state.buf;
181			count += leftover;
182			state.count &= ~(SHA1_BLOCK_SIZE - 1);
183		}
184	}
185
186	memcpy(result, &state.state, SHA256_DIGEST_SIZE);
187
188	/* prevent taking the spurious DNA fault with padlock. */
189	ts_state = irq_ts_save();
190	asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" /* rep xsha256 */
191		      : \
192		      : "c"((unsigned long)state.count + count), \
193			"a"((unsigned long)state.count), \
194			"S"(in), "D"(result));
195	irq_ts_restore(ts_state);
196
197	padlock_output_block((uint32_t *)result, (uint32_t *)out, 8);
198
199out:
200	return err;
201}
202
203static int padlock_sha256_final(struct shash_desc *desc, u8 *out)
204{
205	u8 buf[4];
206
207	return padlock_sha256_finup(desc, buf, 0, out);
208}
209
210static int padlock_cra_init(struct crypto_tfm *tfm)
211{
212	struct crypto_shash *hash = __crypto_shash_cast(tfm);
213	const char *fallback_driver_name = tfm->__crt_alg->cra_name;
214	struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
215	struct crypto_shash *fallback_tfm;
216	int err = -ENOMEM;
217
218	/* Allocate a fallback and abort if it failed. */
219	fallback_tfm = crypto_alloc_shash(fallback_driver_name, 0,
220					  CRYPTO_ALG_NEED_FALLBACK);
221	if (IS_ERR(fallback_tfm)) {
222		printk(KERN_WARNING PFX "Fallback driver '%s' could not be loaded!\n",
223		       fallback_driver_name);
224		err = PTR_ERR(fallback_tfm);
225		goto out;
226	}
227
228	ctx->fallback = fallback_tfm;
229	hash->descsize += crypto_shash_descsize(fallback_tfm);
230	return 0;
231
232out:
233	return err;
234}
235
236static void padlock_cra_exit(struct crypto_tfm *tfm)
237{
238	struct padlock_sha_ctx *ctx = crypto_tfm_ctx(tfm);
239
240	crypto_free_shash(ctx->fallback);
241}
242
243static struct shash_alg sha1_alg = {
244	.digestsize	=	SHA1_DIGEST_SIZE,
245	.init   	= 	padlock_sha_init,
246	.update 	=	padlock_sha_update,
247	.finup  	=	padlock_sha1_finup,
248	.final  	=	padlock_sha1_final,
249	.export		=	padlock_sha_export,
250	.import		=	padlock_sha_import,
 
 
251	.descsize	=	sizeof(struct padlock_sha_desc),
252	.statesize	=	sizeof(struct sha1_state),
253	.base		=	{
254		.cra_name		=	"sha1",
255		.cra_driver_name	=	"sha1-padlock",
256		.cra_priority		=	PADLOCK_CRA_PRIORITY,
257		.cra_flags		=	CRYPTO_ALG_TYPE_SHASH |
258						CRYPTO_ALG_NEED_FALLBACK,
259		.cra_blocksize		=	SHA1_BLOCK_SIZE,
260		.cra_ctxsize		=	sizeof(struct padlock_sha_ctx),
261		.cra_module		=	THIS_MODULE,
262		.cra_init		=	padlock_cra_init,
263		.cra_exit		=	padlock_cra_exit,
264	}
265};
266
267static struct shash_alg sha256_alg = {
268	.digestsize	=	SHA256_DIGEST_SIZE,
269	.init   	= 	padlock_sha_init,
270	.update 	=	padlock_sha_update,
271	.finup  	=	padlock_sha256_finup,
272	.final  	=	padlock_sha256_final,
273	.export		=	padlock_sha_export,
274	.import		=	padlock_sha_import,
 
 
275	.descsize	=	sizeof(struct padlock_sha_desc),
276	.statesize	=	sizeof(struct sha256_state),
277	.base		=	{
278		.cra_name		=	"sha256",
279		.cra_driver_name	=	"sha256-padlock",
280		.cra_priority		=	PADLOCK_CRA_PRIORITY,
281		.cra_flags		=	CRYPTO_ALG_TYPE_SHASH |
282						CRYPTO_ALG_NEED_FALLBACK,
283		.cra_blocksize		=	SHA256_BLOCK_SIZE,
284		.cra_ctxsize		=	sizeof(struct padlock_sha_ctx),
285		.cra_module		=	THIS_MODULE,
286		.cra_init		=	padlock_cra_init,
287		.cra_exit		=	padlock_cra_exit,
288	}
289};
290
291/* Add two shash_alg instance for hardware-implemented *
292* multiple-parts hash supported by VIA Nano Processor.*/
293static int padlock_sha1_init_nano(struct shash_desc *desc)
294{
295	struct sha1_state *sctx = shash_desc_ctx(desc);
296
297	*sctx = (struct sha1_state){
298		.state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
299	};
300
301	return 0;
302}
303
304static int padlock_sha1_update_nano(struct shash_desc *desc,
305			const u8 *data,	unsigned int len)
306{
307	struct sha1_state *sctx = shash_desc_ctx(desc);
308	unsigned int partial, done;
309	const u8 *src;
310	/*The PHE require the out buffer must 128 bytes and 16-bytes aligned*/
311	u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
312		((aligned(STACK_ALIGN)));
313	u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
314	int ts_state;
315
316	partial = sctx->count & 0x3f;
317	sctx->count += len;
318	done = 0;
319	src = data;
320	memcpy(dst, (u8 *)(sctx->state), SHA1_DIGEST_SIZE);
321
322	if ((partial + len) >= SHA1_BLOCK_SIZE) {
323
324		/* Append the bytes in state's buffer to a block to handle */
325		if (partial) {
326			done = -partial;
327			memcpy(sctx->buffer + partial, data,
328				done + SHA1_BLOCK_SIZE);
329			src = sctx->buffer;
330			ts_state = irq_ts_save();
331			asm volatile (".byte 0xf3,0x0f,0xa6,0xc8"
332			: "+S"(src), "+D"(dst) \
333			: "a"((long)-1), "c"((unsigned long)1));
334			irq_ts_restore(ts_state);
335			done += SHA1_BLOCK_SIZE;
336			src = data + done;
337		}
338
339		/* Process the left bytes from the input data */
340		if (len - done >= SHA1_BLOCK_SIZE) {
341			ts_state = irq_ts_save();
342			asm volatile (".byte 0xf3,0x0f,0xa6,0xc8"
343			: "+S"(src), "+D"(dst)
344			: "a"((long)-1),
345			"c"((unsigned long)((len - done) / SHA1_BLOCK_SIZE)));
346			irq_ts_restore(ts_state);
347			done += ((len - done) - (len - done) % SHA1_BLOCK_SIZE);
348			src = data + done;
349		}
350		partial = 0;
351	}
352	memcpy((u8 *)(sctx->state), dst, SHA1_DIGEST_SIZE);
353	memcpy(sctx->buffer + partial, src, len - done);
354
355	return 0;
356}
357
358static int padlock_sha1_final_nano(struct shash_desc *desc, u8 *out)
359{
360	struct sha1_state *state = (struct sha1_state *)shash_desc_ctx(desc);
361	unsigned int partial, padlen;
362	__be64 bits;
363	static const u8 padding[64] = { 0x80, };
364
365	bits = cpu_to_be64(state->count << 3);
366
367	/* Pad out to 56 mod 64 */
368	partial = state->count & 0x3f;
369	padlen = (partial < 56) ? (56 - partial) : ((64+56) - partial);
370	padlock_sha1_update_nano(desc, padding, padlen);
371
372	/* Append length field bytes */
373	padlock_sha1_update_nano(desc, (const u8 *)&bits, sizeof(bits));
374
375	/* Swap to output */
376	padlock_output_block((uint32_t *)(state->state), (uint32_t *)out, 5);
377
378	return 0;
379}
380
381static int padlock_sha256_init_nano(struct shash_desc *desc)
382{
383	struct sha256_state *sctx = shash_desc_ctx(desc);
384
385	*sctx = (struct sha256_state){
386		.state = { SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, \
387				SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7},
388	};
389
390	return 0;
391}
392
393static int padlock_sha256_update_nano(struct shash_desc *desc, const u8 *data,
394			  unsigned int len)
395{
396	struct sha256_state *sctx = shash_desc_ctx(desc);
397	unsigned int partial, done;
398	const u8 *src;
399	/*The PHE require the out buffer must 128 bytes and 16-bytes aligned*/
400	u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__
401		((aligned(STACK_ALIGN)));
402	u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
403	int ts_state;
404
405	partial = sctx->count & 0x3f;
406	sctx->count += len;
407	done = 0;
408	src = data;
409	memcpy(dst, (u8 *)(sctx->state), SHA256_DIGEST_SIZE);
410
411	if ((partial + len) >= SHA256_BLOCK_SIZE) {
412
413		/* Append the bytes in state's buffer to a block to handle */
414		if (partial) {
415			done = -partial;
416			memcpy(sctx->buf + partial, data,
417				done + SHA256_BLOCK_SIZE);
418			src = sctx->buf;
419			ts_state = irq_ts_save();
420			asm volatile (".byte 0xf3,0x0f,0xa6,0xd0"
421			: "+S"(src), "+D"(dst)
422			: "a"((long)-1), "c"((unsigned long)1));
423			irq_ts_restore(ts_state);
424			done += SHA256_BLOCK_SIZE;
425			src = data + done;
426		}
427
428		/* Process the left bytes from input data*/
429		if (len - done >= SHA256_BLOCK_SIZE) {
430			ts_state = irq_ts_save();
431			asm volatile (".byte 0xf3,0x0f,0xa6,0xd0"
432			: "+S"(src), "+D"(dst)
433			: "a"((long)-1),
434			"c"((unsigned long)((len - done) / 64)));
435			irq_ts_restore(ts_state);
436			done += ((len - done) - (len - done) % 64);
437			src = data + done;
438		}
439		partial = 0;
440	}
441	memcpy((u8 *)(sctx->state), dst, SHA256_DIGEST_SIZE);
442	memcpy(sctx->buf + partial, src, len - done);
443
444	return 0;
445}
446
447static int padlock_sha256_final_nano(struct shash_desc *desc, u8 *out)
448{
449	struct sha256_state *state =
450		(struct sha256_state *)shash_desc_ctx(desc);
451	unsigned int partial, padlen;
452	__be64 bits;
453	static const u8 padding[64] = { 0x80, };
454
455	bits = cpu_to_be64(state->count << 3);
456
457	/* Pad out to 56 mod 64 */
458	partial = state->count & 0x3f;
459	padlen = (partial < 56) ? (56 - partial) : ((64+56) - partial);
460	padlock_sha256_update_nano(desc, padding, padlen);
461
462	/* Append length field bytes */
463	padlock_sha256_update_nano(desc, (const u8 *)&bits, sizeof(bits));
464
465	/* Swap to output */
466	padlock_output_block((uint32_t *)(state->state), (uint32_t *)out, 8);
467
468	return 0;
469}
470
471static int padlock_sha_export_nano(struct shash_desc *desc,
472				void *out)
473{
474	int statesize = crypto_shash_statesize(desc->tfm);
475	void *sctx = shash_desc_ctx(desc);
476
477	memcpy(out, sctx, statesize);
478	return 0;
479}
480
481static int padlock_sha_import_nano(struct shash_desc *desc,
482				const void *in)
483{
484	int statesize = crypto_shash_statesize(desc->tfm);
485	void *sctx = shash_desc_ctx(desc);
486
487	memcpy(sctx, in, statesize);
488	return 0;
489}
490
491static struct shash_alg sha1_alg_nano = {
492	.digestsize	=	SHA1_DIGEST_SIZE,
493	.init		=	padlock_sha1_init_nano,
494	.update		=	padlock_sha1_update_nano,
495	.final		=	padlock_sha1_final_nano,
496	.export		=	padlock_sha_export_nano,
497	.import		=	padlock_sha_import_nano,
498	.descsize	=	sizeof(struct sha1_state),
499	.statesize	=	sizeof(struct sha1_state),
500	.base		=	{
501		.cra_name		=	"sha1",
502		.cra_driver_name	=	"sha1-padlock-nano",
503		.cra_priority		=	PADLOCK_CRA_PRIORITY,
504		.cra_flags		=	CRYPTO_ALG_TYPE_SHASH,
505		.cra_blocksize		=	SHA1_BLOCK_SIZE,
506		.cra_module		=	THIS_MODULE,
507	}
508};
509
510static struct shash_alg sha256_alg_nano = {
511	.digestsize	=	SHA256_DIGEST_SIZE,
512	.init		=	padlock_sha256_init_nano,
513	.update		=	padlock_sha256_update_nano,
514	.final		=	padlock_sha256_final_nano,
515	.export		=	padlock_sha_export_nano,
516	.import		=	padlock_sha_import_nano,
517	.descsize	=	sizeof(struct sha256_state),
518	.statesize	=	sizeof(struct sha256_state),
519	.base		=	{
520		.cra_name		=	"sha256",
521		.cra_driver_name	=	"sha256-padlock-nano",
522		.cra_priority		=	PADLOCK_CRA_PRIORITY,
523		.cra_flags		=	CRYPTO_ALG_TYPE_SHASH,
524		.cra_blocksize		=	SHA256_BLOCK_SIZE,
525		.cra_module		=	THIS_MODULE,
526	}
527};
528
 
 
 
 
 
 
529static int __init padlock_init(void)
530{
531	int rc = -ENODEV;
532	struct cpuinfo_x86 *c = &cpu_data(0);
533	struct shash_alg *sha1;
534	struct shash_alg *sha256;
535
536	if (!cpu_has_phe) {
537		printk(KERN_NOTICE PFX "VIA PadLock Hash Engine not detected.\n");
538		return -ENODEV;
539	}
540
541	if (!cpu_has_phe_enabled) {
542		printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
543		return -ENODEV;
544	}
545
546	/* Register the newly added algorithm module if on *
547	* VIA Nano processor, or else just do as before */
548	if (c->x86_model < 0x0f) {
549		sha1 = &sha1_alg;
550		sha256 = &sha256_alg;
551	} else {
552		sha1 = &sha1_alg_nano;
553		sha256 = &sha256_alg_nano;
554	}
555
556	rc = crypto_register_shash(sha1);
557	if (rc)
558		goto out;
559
560	rc = crypto_register_shash(sha256);
561	if (rc)
562		goto out_unreg1;
563
564	printk(KERN_NOTICE PFX "Using VIA PadLock ACE for SHA1/SHA256 algorithms.\n");
565
566	return 0;
567
568out_unreg1:
569	crypto_unregister_shash(sha1);
570
571out:
572	printk(KERN_ERR PFX "VIA PadLock SHA1/SHA256 initialization failed.\n");
573	return rc;
574}
575
576static void __exit padlock_fini(void)
577{
578	struct cpuinfo_x86 *c = &cpu_data(0);
579
580	if (c->x86_model >= 0x0f) {
581		crypto_unregister_shash(&sha1_alg_nano);
582		crypto_unregister_shash(&sha256_alg_nano);
583	} else {
584		crypto_unregister_shash(&sha1_alg);
585		crypto_unregister_shash(&sha256_alg);
586	}
587}
588
589module_init(padlock_init);
590module_exit(padlock_fini);
591
592MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support.");
593MODULE_LICENSE("GPL");
594MODULE_AUTHOR("Michal Ludvig");
595
596MODULE_ALIAS("sha1-all");
597MODULE_ALIAS("sha256-all");
598MODULE_ALIAS("sha1-padlock");
599MODULE_ALIAS("sha256-padlock");